Spaces:
Running
Running
Merge branch 'main' of https://github.com/prathameshks/FoodAnalyzer-API
Browse files- .gitattributes +1 -0
- .gitignore +5 -2
- Dockerfile +22 -0
- README.md +11 -0
- app.py +1 -0
- db/database.py +1 -9
- db/repositories.py +18 -8
- env.py +80 -0
- interfaces/productModels.py +5 -5
- main.py +16 -5
- requirements.txt +8 -7
- routers/analysis.py +22 -9
- routers/product.py +169 -127
- services/auth_service.py +100 -5
- services/ingredientFinderAgent.py +28 -12
- services/productAnalyzerAgent.py +6 -9
- uploaded_images/detected_Snack_0.13_db8318a668504073ad5fd0677187d305.jpg +0 -0
- utils/agent_tools.py +2 -14
- utils/analyze.py +56 -0
- utils/db_utils.py +53 -11
- utils/external_api_utils.py +1 -5
- utils/image_processing_utils.py +1 -5
- utils/ingredient_utils.py +42 -38
- utils/vuforia_utils.py +93 -0
.gitattributes
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
models/mobile_sam.pt filter=lfs diff=lfs merge=lfs -text
|
.gitignore
CHANGED
|
@@ -1,7 +1,5 @@
|
|
| 1 |
# Ignore sensitive data
|
| 2 |
.env
|
| 3 |
-
alembic.ini
|
| 4 |
-
|
| 5 |
|
| 6 |
debug*
|
| 7 |
|
|
@@ -23,5 +21,10 @@ env/
|
|
| 23 |
.idea/
|
| 24 |
*.swp
|
| 25 |
|
|
|
|
| 26 |
uploaded_images/
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
*.pt
|
|
|
|
| 1 |
# Ignore sensitive data
|
| 2 |
.env
|
|
|
|
|
|
|
| 3 |
|
| 4 |
debug*
|
| 5 |
|
|
|
|
| 21 |
.idea/
|
| 22 |
*.swp
|
| 23 |
|
| 24 |
+
# Ignore uploads directory
|
| 25 |
uploaded_images/
|
| 26 |
+
|
| 27 |
+
# But track this specific file
|
| 28 |
+
!uploaded_images/detected_Snack_0.13_db8318a668504073ad5fd0677187d305.jpg
|
| 29 |
+
|
| 30 |
*.pt
|
Dockerfile
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
| 2 |
+
# you will also find guides on how best to write your Dockerfile
|
| 3 |
+
|
| 4 |
+
FROM python:3.12
|
| 5 |
+
|
| 6 |
+
RUN useradd -m -u 1000 user
|
| 7 |
+
USER user
|
| 8 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
| 9 |
+
|
| 10 |
+
WORKDIR /app
|
| 11 |
+
|
| 12 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
| 13 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 14 |
+
|
| 15 |
+
# Fix: Copy files to /app instead of /main to match WORKDIR
|
| 16 |
+
COPY --chown=user . /app
|
| 17 |
+
|
| 18 |
+
# Make sure models directory exists
|
| 19 |
+
RUN mkdir -p /app/models
|
| 20 |
+
|
| 21 |
+
# Run the app using port 7860 (standard for HF Spaces)
|
| 22 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
|
@@ -1,3 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# FoodAnalyzer-API
|
| 2 |
|
| 3 |
## Installation and Setup
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: mit
|
| 3 |
+
title: Food Analyzer API
|
| 4 |
+
sdk: docker
|
| 5 |
+
emoji: 🔥
|
| 6 |
+
colorFrom: red
|
| 7 |
+
colorTo: purple
|
| 8 |
+
pinned: true
|
| 9 |
+
short_description: Fast API
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
# FoodAnalyzer-API
|
| 13 |
|
| 14 |
## Installation and Setup
|
app.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from main import app
|
db/database.py
CHANGED
|
@@ -2,16 +2,8 @@ import os
|
|
| 2 |
from sqlalchemy import create_engine
|
| 3 |
from sqlalchemy.ext.declarative import declarative_base
|
| 4 |
from sqlalchemy.orm import sessionmaker
|
| 5 |
-
from dotenv import load_dotenv
|
| 6 |
|
| 7 |
-
|
| 8 |
-
load_dotenv()
|
| 9 |
-
|
| 10 |
-
# Get database URL from environment variable
|
| 11 |
-
DATABASE_URL = os.getenv(
|
| 12 |
-
"DATABASE_URL",
|
| 13 |
-
"postgresql://postgres:password@localhost:5432/food_ingredients"
|
| 14 |
-
)
|
| 15 |
|
| 16 |
# Create engine
|
| 17 |
engine = create_engine(DATABASE_URL)
|
|
|
|
| 2 |
from sqlalchemy import create_engine
|
| 3 |
from sqlalchemy.ext.declarative import declarative_base
|
| 4 |
from sqlalchemy.orm import sessionmaker
|
|
|
|
| 5 |
|
| 6 |
+
from env import DATABASE_URL
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
# Create engine
|
| 9 |
engine = create_engine(DATABASE_URL)
|
db/repositories.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
from sqlalchemy.orm import Session
|
| 2 |
from sqlalchemy import cast, or_, String
|
| 3 |
from sqlalchemy.dialects.postgresql import JSONB
|
|
@@ -38,15 +39,24 @@ class IngredientRepository:
|
|
| 38 |
return self.db.query(models.Ingredient).offset(skip).limit(limit).all()
|
| 39 |
|
| 40 |
def create_ingredient(self, ingredient_data: IngredientAnalysisResult):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
# Create ingredient record
|
| 42 |
db_ingredient = models.Ingredient(
|
| 43 |
-
name=
|
| 44 |
-
alternate_names=
|
| 45 |
-
safety_rating=
|
| 46 |
-
description=
|
| 47 |
-
health_effects=
|
| 48 |
-
allergic_info=
|
| 49 |
-
diet_type=
|
| 50 |
)
|
| 51 |
self.db.add(db_ingredient)
|
| 52 |
self.db.commit()
|
|
@@ -102,7 +112,7 @@ class ProductRepository:
|
|
| 102 |
|
| 103 |
def add_product(self, product_create: ProductCreate):
|
| 104 |
db_product = self._create_product(product_create)
|
| 105 |
-
self._store_analysis_data(db_product, product_create.ingredients_analysis)
|
| 106 |
return db_product
|
| 107 |
|
| 108 |
def _create_product(self, product_create: ProductCreate):
|
|
|
|
| 1 |
+
import json
|
| 2 |
from sqlalchemy.orm import Session
|
| 3 |
from sqlalchemy import cast, or_, String
|
| 4 |
from sqlalchemy.dialects.postgresql import JSONB
|
|
|
|
| 39 |
return self.db.query(models.Ingredient).offset(skip).limit(limit).all()
|
| 40 |
|
| 41 |
def create_ingredient(self, ingredient_data: IngredientAnalysisResult):
|
| 42 |
+
# convert the json data to string using json.dumps
|
| 43 |
+
name = ingredient_data.name
|
| 44 |
+
alternate_names = json.dumps(ingredient_data.alternate_names)
|
| 45 |
+
safety_rating = ingredient_data.safety_rating
|
| 46 |
+
description = ingredient_data.description
|
| 47 |
+
health_effects = json.dumps(ingredient_data.health_effects)
|
| 48 |
+
allergic_info = json.dumps(ingredient_data.allergic_info) if ingredient_data.allergic_info else None
|
| 49 |
+
diet_type = ingredient_data.diet_type
|
| 50 |
+
|
| 51 |
# Create ingredient record
|
| 52 |
db_ingredient = models.Ingredient(
|
| 53 |
+
name=name,
|
| 54 |
+
alternate_names=alternate_names,
|
| 55 |
+
safety_rating=safety_rating,
|
| 56 |
+
description=description,
|
| 57 |
+
health_effects=health_effects,
|
| 58 |
+
allergic_info=allergic_info,
|
| 59 |
+
diet_type=diet_type
|
| 60 |
)
|
| 61 |
self.db.add(db_ingredient)
|
| 62 |
self.db.commit()
|
|
|
|
| 112 |
|
| 113 |
def add_product(self, product_create: ProductCreate):
|
| 114 |
db_product = self._create_product(product_create)
|
| 115 |
+
# self._store_analysis_data(db_product, product_create.ingredients_analysis)
|
| 116 |
return db_product
|
| 117 |
|
| 118 |
def _create_product(self, product_create: ProductCreate):
|
env.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from dotenv import load_dotenv
|
| 3 |
+
|
| 4 |
+
# Load environment variables from .env file
|
| 5 |
+
load_dotenv()
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
# Environment variables for FoodAnalyzer-API
|
| 9 |
+
PORT = int(os.getenv("PORT", 8000))
|
| 10 |
+
UPLOADED_IMAGES_DIR = "uploaded_images"
|
| 11 |
+
if not os.path.exists(UPLOADED_IMAGES_DIR):
|
| 12 |
+
os.makedirs(UPLOADED_IMAGES_DIR)
|
| 13 |
+
|
| 14 |
+
# JWT Secret Key
|
| 15 |
+
SECRET_KEY = os.getenv("SECRET_KEY", "09d8f7a6b5c4e3d2f1a0b9c8d7e6f5a4")
|
| 16 |
+
ALGORITHM = os.getenv("ALGORITHM", "HS256")
|
| 17 |
+
ACCESS_TOKEN_EXPIRE_MINUTES = int(os.getenv("ACCESS_TOKEN_EXPIRE_MINUTES", 30))
|
| 18 |
+
|
| 19 |
+
# Hugging Face Transformers API key not required
|
| 20 |
+
# HUGGING_FACE_API_KEY = os.getenv("HUGGING_FACE_API_KEY", None)
|
| 21 |
+
# OpenAI API key not required
|
| 22 |
+
# OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None)
|
| 23 |
+
|
| 24 |
+
# API keys and model names for different LLMs here
|
| 25 |
+
# for google ai studio
|
| 26 |
+
LLM_API_KEY = os.getenv("LLM_API_KEY", None)
|
| 27 |
+
LLM_MODEL_NAME = os.getenv("LLM_MODEL_NAME", "gemini-2.0-flash")
|
| 28 |
+
|
| 29 |
+
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY", None)
|
| 30 |
+
GOOGLE_CSE_ID = os.getenv("GOOGLE_CSE_ID", None)
|
| 31 |
+
|
| 32 |
+
USDA_API_KEY = os.getenv("USDA_API_KEY", "DEMO_KEY")
|
| 33 |
+
|
| 34 |
+
# pg db url
|
| 35 |
+
DATABASE_URL = os.getenv("DATABASE_URL", None)
|
| 36 |
+
|
| 37 |
+
# Vuforia keys
|
| 38 |
+
VUFORIA_SERVER_ACCESS_KEY = os.getenv("VUFORIA_SERVER_ACCESS_KEY", None)
|
| 39 |
+
VUFORIA_SERVER_SECRET_KEY = os.getenv("VUFORIA_SERVER_SECRET_KEY", None)
|
| 40 |
+
VUFORIA_TARGET_DATABASE_NAME = os.getenv("VUFORIA_TARGET_DATABASE_NAME", "FoodAnalyzer_BE_PROJ")
|
| 41 |
+
VUFORIA_TARGET_DATABASE_ID = os.getenv("VUFORIA_TARGET_DATABASE_ID", "FoodAnalyzer_BE_PROJ")
|
| 42 |
+
|
| 43 |
+
# langsmith keys optional
|
| 44 |
+
LANGSMITH_TRACING = os.getenv("LANGSMITH_TRACING", True)
|
| 45 |
+
LANGSMITH_ENDPOINT = os.getenv("LANGSMITH_ENDPOINT", "https://api.smith.langchain.com")
|
| 46 |
+
LANGSMITH_API_KEY = os.getenv("LANGSMITH_API_KEY", None)
|
| 47 |
+
LANGSMITH_PROJECT = os.getenv("LANGSMITH_PROJECT", None)
|
| 48 |
+
|
| 49 |
+
# app settings
|
| 50 |
+
PARALLEL_RATE_LIMIT = int(os.getenv("PARALLEL_RATE_LIMIT", 10))
|
| 51 |
+
|
| 52 |
+
# Rate limiting configuration in seconds
|
| 53 |
+
PUBCHEM_TIMEOUT = int(os.getenv("PUBCHEM_TIMEOUT", 2))
|
| 54 |
+
PUBCHEM_MAX_RETRIES = int(os.getenv("PUBCHEM_MAX_RETRIES", 2))
|
| 55 |
+
|
| 56 |
+
# Delay in seconds
|
| 57 |
+
DUCKDUCKGO_RATE_LIMIT_DELAY = int(os.getenv("DUCKDUCKGO_RATE_LIMIT_DELAY", 2))
|
| 58 |
+
DUCKDUCKGO_MAX_RETRIES = int(os.getenv("DUCKDUCKGO_MAX_RETRIES", 2))
|
| 59 |
+
|
| 60 |
+
# fake response for testing
|
| 61 |
+
SEND_FAKE_TARGET = os.getenv("SEND_FAKE_TARGET", False) == "true"
|
| 62 |
+
FAKE_TARGET_IMAGE_NAME = os.getenv("FAKE_TARGET_IMAGE_NAME", "detected_Snack_0.13_db8318a668504073ad5fd0677187d305.jpg")
|
| 63 |
+
|
| 64 |
+
# Define Required Environment Variables and show error if not set
|
| 65 |
+
required_env_vars = {
|
| 66 |
+
"LLM_API_KEY":LLM_API_KEY,
|
| 67 |
+
"GOOGLE_API_KEY":GOOGLE_API_KEY,
|
| 68 |
+
"GOOGLE_CSE_ID":GOOGLE_CSE_ID,
|
| 69 |
+
"USDA_API_KEY":USDA_API_KEY,
|
| 70 |
+
"DATABASE_URL":DATABASE_URL,
|
| 71 |
+
"VUFORIA_SERVER_ACCESS_KEY":VUFORIA_SERVER_ACCESS_KEY,
|
| 72 |
+
"VUFORIA_SERVER_SECRET_KEY":VUFORIA_SERVER_SECRET_KEY,
|
| 73 |
+
"VUFORIA_TARGET_DATABASE_NAME":VUFORIA_TARGET_DATABASE_NAME,
|
| 74 |
+
"VUFORIA_TARGET_DATABASE_ID":VUFORIA_TARGET_DATABASE_ID,
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
# Check if all required environment variables are set
|
| 78 |
+
for var in required_env_vars.keys():
|
| 79 |
+
if required_env_vars[var] is None:
|
| 80 |
+
raise ValueError(f"Environment variable {var} is not set. Please set it in the .env file.")
|
interfaces/productModels.py
CHANGED
|
@@ -8,17 +8,17 @@ class ProductIngredientsRequest(BaseModel):
|
|
| 8 |
|
| 9 |
class ProductCreate(BaseModel):
|
| 10 |
product_name: str
|
| 11 |
-
ingredients: List[str]
|
| 12 |
overall_safety_score: int
|
| 13 |
suitable_diet_types: str
|
| 14 |
-
allergy_warnings: List[str]
|
| 15 |
usage_recommendations: str
|
| 16 |
-
health_insights: Dict[str, List[str]]
|
| 17 |
-
ingredient_interactions: List[str]
|
| 18 |
key_takeaway: str
|
| 19 |
ingredients_count: int
|
| 20 |
user_id: int
|
| 21 |
timestamp: datetime
|
| 22 |
-
ingredient_ids: List[int]
|
| 23 |
|
| 24 |
|
|
|
|
| 8 |
|
| 9 |
class ProductCreate(BaseModel):
|
| 10 |
product_name: str
|
| 11 |
+
ingredients: List[str]|str
|
| 12 |
overall_safety_score: int
|
| 13 |
suitable_diet_types: str
|
| 14 |
+
allergy_warnings: List[str]|str
|
| 15 |
usage_recommendations: str
|
| 16 |
+
health_insights: Dict[str, List[str]]|str
|
| 17 |
+
ingredient_interactions: List[str]|str
|
| 18 |
key_takeaway: str
|
| 19 |
ingredients_count: int
|
| 20 |
user_id: int
|
| 21 |
timestamp: datetime
|
| 22 |
+
ingredient_ids: List[int]|str
|
| 23 |
|
| 24 |
|
main.py
CHANGED
|
@@ -6,25 +6,36 @@ from routers.auth import router as auth_router
|
|
| 6 |
from routers.analysis import router as analysis_router
|
| 7 |
from routers.history import router as history_router
|
| 8 |
from routers.product import router as product_router
|
| 9 |
-
from dotenv import load_dotenv
|
| 10 |
import os
|
| 11 |
import uvicorn
|
| 12 |
from pathlib import Path
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
-
load_dotenv()
|
| 15 |
-
# Load environment variables from .env file
|
| 16 |
-
PORT = os.getenv("PORT", 8000)
|
| 17 |
|
| 18 |
# Define the templates directory
|
| 19 |
templates = Jinja2Templates(directory="templates")
|
| 20 |
|
| 21 |
app = FastAPI()
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
@app.get("/")
|
| 24 |
def read_root():
|
| 25 |
return RedirectResponse("/api")
|
| 26 |
|
| 27 |
# print every request data for request using middleware
|
|
|
|
| 28 |
@app.middleware("http")
|
| 29 |
async def log_requests(request: Request, call_next):
|
| 30 |
# Store the body content before sending to the next handler
|
|
@@ -33,7 +44,7 @@ async def log_requests(request: Request, call_next):
|
|
| 33 |
request._body = body_content
|
| 34 |
response = await call_next(request)
|
| 35 |
print(f"Request: {request.method} {request.url}")
|
| 36 |
-
|
| 37 |
print(f"Headers: {request.headers}")
|
| 38 |
return response
|
| 39 |
|
|
|
|
| 6 |
from routers.analysis import router as analysis_router
|
| 7 |
from routers.history import router as history_router
|
| 8 |
from routers.product import router as product_router
|
|
|
|
| 9 |
import os
|
| 10 |
import uvicorn
|
| 11 |
from pathlib import Path
|
| 12 |
+
import tensorflow as tf
|
| 13 |
+
import tensorflow_hub as hub
|
| 14 |
+
from env import PORT
|
| 15 |
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
# Define the templates directory
|
| 18 |
templates = Jinja2Templates(directory="templates")
|
| 19 |
|
| 20 |
app = FastAPI()
|
| 21 |
|
| 22 |
+
# Suppress TensorFlow warnings
|
| 23 |
+
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # 0=all, 1=no INFO, 2=no WARNING, 3=no ERROR
|
| 24 |
+
|
| 25 |
+
# Store the model as a state variable in the app
|
| 26 |
+
@app.on_event("startup")
|
| 27 |
+
async def startup_event():
|
| 28 |
+
# Load model once during startup
|
| 29 |
+
print("Loading TensorFlow model...")
|
| 30 |
+
app.state.detector = hub.load("https://tfhub.dev/google/openimages_v4/ssd/mobilenet_v2/1").signatures['default']
|
| 31 |
+
print("TensorFlow model loaded successfully!")
|
| 32 |
+
|
| 33 |
@app.get("/")
|
| 34 |
def read_root():
|
| 35 |
return RedirectResponse("/api")
|
| 36 |
|
| 37 |
# print every request data for request using middleware
|
| 38 |
+
|
| 39 |
@app.middleware("http")
|
| 40 |
async def log_requests(request: Request, call_next):
|
| 41 |
# Store the body content before sending to the next handler
|
|
|
|
| 44 |
request._body = body_content
|
| 45 |
response = await call_next(request)
|
| 46 |
print(f"Request: {request.method} {request.url}")
|
| 47 |
+
print(f"Data: {body_content}"[:100])
|
| 48 |
print(f"Headers: {request.headers}")
|
| 49 |
return response
|
| 50 |
|
requirements.txt
CHANGED
|
@@ -2,14 +2,15 @@
|
|
| 2 |
fastapi==0.115.12
|
| 3 |
uvicorn==0.34.0
|
| 4 |
python-multipart==0.0.20
|
| 5 |
-
jinja2
|
|
|
|
| 6 |
|
| 7 |
# Database
|
| 8 |
sqlalchemy==2.0.40
|
| 9 |
alembic==1.15.2
|
| 10 |
psycopg2-binary==2.9.10
|
| 11 |
-
mysqlclient
|
| 12 |
-
pymysql
|
| 13 |
|
| 14 |
# Authentication
|
| 15 |
python-jose==3.3.0
|
|
@@ -19,16 +20,16 @@ bcrypt==4.0.1 # Using 4.0.1 to avoid the attribute error
|
|
| 19 |
# AI & ML
|
| 20 |
langchain==0.3.23
|
| 21 |
langchain-community==0.3.21
|
| 22 |
-
langchain-google-genai
|
| 23 |
langchain-openai==0.3.12
|
| 24 |
-
google-generativeai
|
| 25 |
openai==1.73.0
|
| 26 |
langgraph==0.3.27
|
| 27 |
langsmith==0.3.30
|
| 28 |
|
| 29 |
# Computer Vision
|
| 30 |
-
tensorflow
|
| 31 |
-
tensorflow_hub
|
| 32 |
pillow==11.1.0
|
| 33 |
opencv-python==4.11.0.86
|
| 34 |
pytesseract==0.3.13
|
|
|
|
| 2 |
fastapi==0.115.12
|
| 3 |
uvicorn==0.34.0
|
| 4 |
python-multipart==0.0.20
|
| 5 |
+
jinja2==3.1.6
|
| 6 |
+
aiohttp==3.11.16
|
| 7 |
|
| 8 |
# Database
|
| 9 |
sqlalchemy==2.0.40
|
| 10 |
alembic==1.15.2
|
| 11 |
psycopg2-binary==2.9.10
|
| 12 |
+
mysqlclient==2.2.7
|
| 13 |
+
pymysql==1.1.1
|
| 14 |
|
| 15 |
# Authentication
|
| 16 |
python-jose==3.3.0
|
|
|
|
| 20 |
# AI & ML
|
| 21 |
langchain==0.3.23
|
| 22 |
langchain-community==0.3.21
|
| 23 |
+
langchain-google-genai==2.0.10
|
| 24 |
langchain-openai==0.3.12
|
| 25 |
+
google-generativeai==0.8.4
|
| 26 |
openai==1.73.0
|
| 27 |
langgraph==0.3.27
|
| 28 |
langsmith==0.3.30
|
| 29 |
|
| 30 |
# Computer Vision
|
| 31 |
+
tensorflow==2.19.0
|
| 32 |
+
tensorflow_hub==0.16.1
|
| 33 |
pillow==11.1.0
|
| 34 |
opencv-python==4.11.0.86
|
| 35 |
pytesseract==0.3.13
|
routers/analysis.py
CHANGED
|
@@ -8,7 +8,7 @@ import pytz
|
|
| 8 |
from sqlalchemy.orm import Session
|
| 9 |
from typing import List, Dict, Any
|
| 10 |
from db.models import User, Ingredient
|
| 11 |
-
from interfaces.ingredientModels import IngredientAnalysisResult
|
| 12 |
from interfaces.productModels import ProductIngredientsRequest
|
| 13 |
from logger_manager import log_info, log_error
|
| 14 |
from db.database import get_db,SessionLocal
|
|
@@ -94,17 +94,30 @@ async def process_ingredients_endpoint(product_ingredient: ProductIngredientsReq
|
|
| 94 |
|
| 95 |
# Step 2: Generate aggregate analysis with product analyzer agent
|
| 96 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
product_analysis = await analyze_product_ingredients(
|
| 98 |
ingredients_data=ingredient_results,
|
| 99 |
-
user_preferences=
|
| 100 |
-
"user_id": current_user.id,
|
| 101 |
-
"allergies": current_user.preferences[0].allergens if current_user.preferences else None,
|
| 102 |
-
"dietary_restrictions": current_user.preferences[0].dietary_restrictions if current_user.preferences else None
|
| 103 |
-
} if current_user else {}
|
| 104 |
)
|
| 105 |
|
| 106 |
# print("Product analysis result:", product_analysis)
|
| 107 |
-
|
| 108 |
# Step 3: Prepare final response
|
| 109 |
result = {
|
| 110 |
"ingredients_count": len(ingredients),
|
|
@@ -139,5 +152,5 @@ async def get_analysis_by_marker_id(target_id: str, db: Session = Depends(get_db
|
|
| 139 |
return product_data
|
| 140 |
|
| 141 |
except Exception as e:
|
| 142 |
-
log_error(f"Error in get_analysis_by_marker_id: {str(e)}", e)
|
| 143 |
-
raise HTTPException(status_code=500, detail="Internal Server Error")
|
|
|
|
| 8 |
from sqlalchemy.orm import Session
|
| 9 |
from typing import List, Dict, Any
|
| 10 |
from db.models import User, Ingredient
|
| 11 |
+
from interfaces.ingredientModels import IngredientAnalysisResult, IngredientRequest
|
| 12 |
from interfaces.productModels import ProductIngredientsRequest
|
| 13 |
from logger_manager import log_info, log_error
|
| 14 |
from db.database import get_db,SessionLocal
|
|
|
|
| 94 |
|
| 95 |
# Step 2: Generate aggregate analysis with product analyzer agent
|
| 96 |
|
| 97 |
+
# Safely get user preferences, handling the case where the preferences table doesn't exist
|
| 98 |
+
user_preferences = {}
|
| 99 |
+
if current_user:
|
| 100 |
+
user_preferences["user_id"] = current_user.id
|
| 101 |
+
try:
|
| 102 |
+
# Only try to access preferences if the relationship exists
|
| 103 |
+
if hasattr(current_user, 'preferences') and current_user.preferences:
|
| 104 |
+
user_preferences["allergies"] = current_user.preferences[0].allergens
|
| 105 |
+
user_preferences["dietary_restrictions"] = current_user.preferences[0].dietary_restrictions
|
| 106 |
+
else:
|
| 107 |
+
user_preferences["allergies"] = None
|
| 108 |
+
user_preferences["dietary_restrictions"] = None
|
| 109 |
+
except Exception as e:
|
| 110 |
+
log_error(f"Error accessing user preferences: {e}", e)
|
| 111 |
+
user_preferences["allergies"] = None
|
| 112 |
+
user_preferences["dietary_restrictions"] = None
|
| 113 |
+
|
| 114 |
product_analysis = await analyze_product_ingredients(
|
| 115 |
ingredients_data=ingredient_results,
|
| 116 |
+
user_preferences=user_preferences
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
)
|
| 118 |
|
| 119 |
# print("Product analysis result:", product_analysis)
|
| 120 |
+
|
| 121 |
# Step 3: Prepare final response
|
| 122 |
result = {
|
| 123 |
"ingredients_count": len(ingredients),
|
|
|
|
| 152 |
return product_data
|
| 153 |
|
| 154 |
except Exception as e:
|
| 155 |
+
log_error(f"Error in get_analysis_by_marker_id: {str(e)}", e)
|
| 156 |
+
raise HTTPException(status_code=500, detail="Internal Server Error")
|
routers/product.py
CHANGED
|
@@ -1,9 +1,11 @@
|
|
|
|
|
| 1 |
import io
|
| 2 |
from fastapi import APIRouter, Request, HTTPException, File, UploadFile, Form
|
| 3 |
from fastapi.responses import JSONResponse, FileResponse
|
| 4 |
from typing import List, Dict, Any
|
| 5 |
from logger_manager import log_debug, log_info, log_error
|
| 6 |
import os
|
|
|
|
| 7 |
from services.product_service import ProductService
|
| 8 |
from db.models import Marker, Product
|
| 9 |
from sqlalchemy.orm import Session
|
|
@@ -20,47 +22,29 @@ from db.database import get_db
|
|
| 20 |
from fastapi import Depends
|
| 21 |
from db.repositories import ProductRepository, IngredientRepository
|
| 22 |
|
| 23 |
-
from dotenv import load_dotenv
|
| 24 |
|
| 25 |
|
| 26 |
from services.ingredients import IngredientService
|
| 27 |
from services.productAnalyzerAgent import analyze_product_ingredients
|
|
|
|
|
|
|
| 28 |
from utils.fetch_data import fetch_product_data_from_api
|
|
|
|
|
|
|
| 29 |
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
UPLOADED_IMAGES_DIR = "uploaded_images"
|
| 35 |
-
if not os.path.exists(UPLOADED_IMAGES_DIR):
|
| 36 |
-
os.makedirs(UPLOADED_IMAGES_DIR)
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
# TensorFlow model caching
|
| 40 |
-
detector = None
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
def load_detector():
|
| 44 |
-
global detector
|
| 45 |
-
if detector is None:
|
| 46 |
-
detector = hub.load("https://tfhub.dev/google/openimages_v4/ssd/mobilenet_v2/1").signatures['default']
|
| 47 |
-
|
| 48 |
-
VUFORIA_SERVER_ACCESS_KEY = os.getenv("VUFORIA_SERVER_ACCESS_KEY")
|
| 49 |
-
VUFORIA_SERVER_SECRET_KEY = os.getenv("VUFORIA_SERVER_SECRET_KEY")
|
| 50 |
-
VUFORIA_TARGET_DATABASE_NAME = os.getenv("VUFORIA_TARGET_DATABASE_NAME")
|
| 51 |
-
VUFORIA_TARGET_DATABASE_ID = os.getenv("VUFORIA_TARGET_DATABASE_ID")
|
| 52 |
|
| 53 |
router = APIRouter()
|
| 54 |
|
| 55 |
|
| 56 |
TARGET_CLASSES = set(["Food processor", "Fast food", "Food", "Seafood", "Snack"])
|
| 57 |
|
| 58 |
-
def run_object_detection(image: Image.Image):
|
| 59 |
-
|
|
|
|
| 60 |
image_np = np.array(image)
|
| 61 |
-
# Convert to tensor without specifying dtype
|
| 62 |
input_tensor = tf.convert_to_tensor(image_np)[tf.newaxis, ...]
|
| 63 |
-
# Convert to float32 and normalize to [0,1]
|
| 64 |
input_tensor = tf.cast(input_tensor, tf.float32) / 255.0
|
| 65 |
results = detector(input_tensor)
|
| 66 |
results = {k: v.numpy() for k, v in results.items()}
|
|
@@ -69,26 +53,25 @@ def run_object_detection(image: Image.Image):
|
|
| 69 |
def get_filtered_class_boxes(results):
|
| 70 |
# for same class, keep the one with the highest score
|
| 71 |
# and remove duplicates
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
|
| 76 |
for i in range(len(results["detection_scores"])):
|
| 77 |
class_name = results["detection_class_entities"][i].decode("utf-8")
|
| 78 |
box = results["detection_boxes"][i]
|
| 79 |
score = results["detection_scores"][i]
|
| 80 |
if class_name in TARGET_CLASSES:
|
| 81 |
-
if
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
else:
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
return boxes, classes, scores
|
| 92 |
|
| 93 |
def crop_image(image_np, box):
|
| 94 |
ymin, xmin, ymax, xmax = box
|
|
@@ -103,119 +86,167 @@ def crop_image(image_np, box):
|
|
| 103 |
|
| 104 |
@router.post("/add")
|
| 105 |
async def create_product(
|
| 106 |
-
request: Request,
|
|
|
|
| 107 |
):
|
| 108 |
"""Endpoint to add a new product, its ingredients, and associated markers."""
|
| 109 |
try:
|
| 110 |
log_info("Create product endpoint called")
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
|
| 116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
|
| 118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
product_create_data = ProductCreate(
|
| 120 |
-
product_name=
|
| 121 |
-
ingredients=
|
| 122 |
-
overall_safety_score=
|
| 123 |
-
suitable_diet_types=
|
| 124 |
-
allergy_warnings=
|
| 125 |
-
usage_recommendations=
|
| 126 |
-
health_insights=
|
| 127 |
-
ingredient_interactions=
|
| 128 |
-
key_takeaway=
|
| 129 |
-
ingredients_count=
|
| 130 |
-
user_id=
|
| 131 |
-
timestamp=
|
| 132 |
-
ingredient_ids=[]
|
| 133 |
)
|
| 134 |
|
| 135 |
-
# Find ingredients and append their IDs
|
| 136 |
-
ingredient_repo = IngredientRepository(db)
|
| 137 |
-
for ingredient_name in product_create_data.ingredients:
|
| 138 |
-
ingredient = ingredient_repo.get_ingredient_by_name(ingredient_name)
|
| 139 |
-
if ingredient:
|
| 140 |
-
product_create_data.ingredient_ids.append(ingredient.id)
|
| 141 |
-
|
| 142 |
-
# Analyze product ingredients and store analysis data
|
| 143 |
-
ingredient_results = []
|
| 144 |
-
for ingredient_name in product_create_data.ingredients:
|
| 145 |
-
ingredient = ingredient_repo.get_ingredient_by_name(ingredient_name)
|
| 146 |
-
if ingredient:
|
| 147 |
-
ingredient_results.append(ingredient)
|
| 148 |
-
|
| 149 |
-
product_analysis = await analyze_product_ingredients(
|
| 150 |
-
ingredients_data=ingredient_results,
|
| 151 |
-
user_preferences={
|
| 152 |
-
"user_id": product_create_data.user_id,
|
| 153 |
-
"allergies": None,
|
| 154 |
-
"dietary_restrictions": None
|
| 155 |
-
}
|
| 156 |
-
)
|
| 157 |
-
product_create_data.ingredients_analysis = product_analysis
|
| 158 |
|
| 159 |
-
#
|
| 160 |
product_repo = ProductRepository(db)
|
| 161 |
product = product_repo.add_product(product_create_data)
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
|
|
|
| 176 |
except Exception as e:
|
| 177 |
-
|
|
|
|
|
|
|
| 178 |
|
| 179 |
|
| 180 |
@router.post("/process_image")
|
| 181 |
-
async def process_image_endpoint(
|
| 182 |
"""
|
| 183 |
Receives an image file, performs object detection, and returns information about detected objects.
|
| 184 |
"""
|
| 185 |
log_info("Process image endpoint called")
|
| 186 |
try:
|
| 187 |
# Read image from the uploaded file
|
| 188 |
-
image_data = await
|
| 189 |
image = Image.open(io.BytesIO(image_data)).convert("RGB")
|
| 190 |
|
| 191 |
-
# Run object detection
|
| 192 |
-
results, image_np = run_object_detection(image)
|
| 193 |
|
| 194 |
# Get filtered class boxes
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 217 |
except Exception as e:
|
| 218 |
-
log_error(f"Error processing image: {e}",
|
| 219 |
raise HTTPException(status_code=500, detail=f"Error processing image: {e}")
|
| 220 |
|
| 221 |
|
|
@@ -248,4 +279,15 @@ async def get_image(image_name: str):
|
|
| 248 |
if os.path.exists(image_path):
|
| 249 |
return FileResponse(image_path, media_type="image/jpeg")
|
| 250 |
else:
|
| 251 |
-
return JSONResponse({"error": "Image not found"}, status_code=404)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
import io
|
| 3 |
from fastapi import APIRouter, Request, HTTPException, File, UploadFile, Form
|
| 4 |
from fastapi.responses import JSONResponse, FileResponse
|
| 5 |
from typing import List, Dict, Any
|
| 6 |
from logger_manager import log_debug, log_info, log_error
|
| 7 |
import os
|
| 8 |
+
from services.auth_service import get_current_user
|
| 9 |
from services.product_service import ProductService
|
| 10 |
from db.models import Marker, Product
|
| 11 |
from sqlalchemy.orm import Session
|
|
|
|
| 22 |
from fastapi import Depends
|
| 23 |
from db.repositories import ProductRepository, IngredientRepository
|
| 24 |
|
|
|
|
| 25 |
|
| 26 |
|
| 27 |
from services.ingredients import IngredientService
|
| 28 |
from services.productAnalyzerAgent import analyze_product_ingredients
|
| 29 |
+
from utils.analyze import process_product_ingredients
|
| 30 |
+
from utils.db_utils import add_product_to_database
|
| 31 |
from utils.fetch_data import fetch_product_data_from_api
|
| 32 |
+
import uuid
|
| 33 |
+
import json
|
| 34 |
|
| 35 |
+
# import environment variables
|
| 36 |
+
from env import FAKE_TARGET_IMAGE_NAME, SEND_FAKE_TARGET,UPLOADED_IMAGES_DIR, VUFORIA_SERVER_ACCESS_KEY,VUFORIA_SERVER_SECRET_KEY,VUFORIA_TARGET_DATABASE_NAME,VUFORIA_TARGET_DATABASE_ID
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
router = APIRouter()
|
| 39 |
|
| 40 |
|
| 41 |
TARGET_CLASSES = set(["Food processor", "Fast food", "Food", "Seafood", "Snack"])
|
| 42 |
|
| 43 |
+
def run_object_detection(image: Image.Image, request: Request):
|
| 44 |
+
# Access the model from app state
|
| 45 |
+
detector = request.app.state.detector
|
| 46 |
image_np = np.array(image)
|
|
|
|
| 47 |
input_tensor = tf.convert_to_tensor(image_np)[tf.newaxis, ...]
|
|
|
|
| 48 |
input_tensor = tf.cast(input_tensor, tf.float32) / 255.0
|
| 49 |
results = detector(input_tensor)
|
| 50 |
results = {k: v.numpy() for k, v in results.items()}
|
|
|
|
| 53 |
def get_filtered_class_boxes(results):
|
| 54 |
# for same class, keep the one with the highest score
|
| 55 |
# and remove duplicates
|
| 56 |
+
high_boxes = None
|
| 57 |
+
high_classes = None
|
| 58 |
+
high_scores = None
|
| 59 |
|
| 60 |
for i in range(len(results["detection_scores"])):
|
| 61 |
class_name = results["detection_class_entities"][i].decode("utf-8")
|
| 62 |
box = results["detection_boxes"][i]
|
| 63 |
score = results["detection_scores"][i]
|
| 64 |
if class_name in TARGET_CLASSES:
|
| 65 |
+
if high_boxes is None:
|
| 66 |
+
high_boxes = box
|
| 67 |
+
high_classes = class_name
|
| 68 |
+
high_scores = score
|
| 69 |
else:
|
| 70 |
+
if score > high_scores:
|
| 71 |
+
high_boxes = box
|
| 72 |
+
high_classes = class_name
|
| 73 |
+
high_scores = score
|
| 74 |
+
return high_boxes, high_classes, high_scores
|
|
|
|
| 75 |
|
| 76 |
def crop_image(image_np, box):
|
| 77 |
ymin, xmin, ymax, xmax = box
|
|
|
|
| 86 |
|
| 87 |
@router.post("/add")
|
| 88 |
async def create_product(
|
| 89 |
+
request: Request,
|
| 90 |
+
db: Session = Depends(get_db)
|
| 91 |
):
|
| 92 |
"""Endpoint to add a new product, its ingredients, and associated markers."""
|
| 93 |
try:
|
| 94 |
log_info("Create product endpoint called")
|
| 95 |
+
# Get the request body
|
| 96 |
+
form_data = await request.form()
|
| 97 |
+
name = form_data.get("name")
|
| 98 |
+
image_name = form_data.get("image_name")
|
| 99 |
+
|
| 100 |
+
# Extract all ingredients[] fields as a list
|
| 101 |
+
ingredients_list = []
|
| 102 |
+
for key, value in form_data.multi_items():
|
| 103 |
+
if key == "ingredients[]":
|
| 104 |
+
ingredients_list.append(value)
|
| 105 |
+
|
| 106 |
+
log_debug(f"Received product name: {name}")
|
| 107 |
+
log_debug(f"Received ingredients: {ingredients_list}")
|
| 108 |
+
log_debug(f"Received image name: {image_name}")
|
| 109 |
+
|
| 110 |
+
# Save the uploaded image
|
| 111 |
+
image_path = os.path.join(UPLOADED_IMAGES_DIR, image_name)
|
| 112 |
+
|
| 113 |
+
# analyze the product ingredients
|
| 114 |
+
results = await process_product_ingredients(ingredients_list)
|
| 115 |
+
|
| 116 |
+
# extract data from the analysis results
|
| 117 |
+
# result = {
|
| 118 |
+
# "ingredients_count": len(product_ingredients),
|
| 119 |
+
# "processed_ingredients": ingredient_results,
|
| 120 |
+
# "ingredient_ids": product_analysis["ingredient_ids"],
|
| 121 |
+
# "overall_analysis": product_analysis,
|
| 122 |
+
# "timestamp": datetime.now(tz=pytz.timezone('Asia/Kolkata')).isoformat()
|
| 123 |
+
# }
|
| 124 |
+
# {{
|
| 125 |
+
# "overall_safety_score": (number between 1-10),
|
| 126 |
+
# "suitable_diet_types": (strings from "Vegan", "Vegetarian", "Non-Vegetarian"),
|
| 127 |
+
# "allergy_warnings": (array of strings),
|
| 128 |
+
# "usage_recommendations": (string with specific guidance),
|
| 129 |
+
# "health_insights": {{
|
| 130 |
+
# "benefits": (array of strings),
|
| 131 |
+
# "concerns": (array of strings)
|
| 132 |
+
# }},
|
| 133 |
+
# "ingredient_interactions": (array of strings),
|
| 134 |
+
# "key_takeaway": (string)
|
| 135 |
+
# }}
|
| 136 |
|
| 137 |
+
# Check if the analysis results are valid
|
| 138 |
+
analysis_results = results.get("overall_analysis", {})
|
| 139 |
+
overall_safety_score = analysis_results.get("overall_safety_score", 0)
|
| 140 |
+
suitable_diet_types = analysis_results.get("suitable_diet_types", [])
|
| 141 |
+
allergy_warnings = analysis_results.get("allergy_warnings", [])
|
| 142 |
+
usage_recommendations = analysis_results.get("usage_recommendations", "")
|
| 143 |
+
health_insights = analysis_results.get("health_insights", {})
|
| 144 |
+
ingredient_interactions = analysis_results.get("ingredient_interactions", [])
|
| 145 |
+
key_takeaway = analysis_results.get("key_takeaway", "")
|
| 146 |
|
| 147 |
+
current_user_id = 0
|
| 148 |
+
try:
|
| 149 |
+
current_user = await get_current_user()
|
| 150 |
+
current_user_id = current_user.id
|
| 151 |
+
except:
|
| 152 |
+
# Handle case where user is not authenticated
|
| 153 |
+
log_error("User not authenticated, using default user ID")
|
| 154 |
+
current_user_id = 0 # Default user ID, change as needed
|
| 155 |
+
|
| 156 |
+
# Create product data model
|
| 157 |
product_create_data = ProductCreate(
|
| 158 |
+
product_name=name,
|
| 159 |
+
ingredients=json.dumps(ingredients_list),
|
| 160 |
+
overall_safety_score=overall_safety_score,
|
| 161 |
+
suitable_diet_types=json.dumps(suitable_diet_types),
|
| 162 |
+
allergy_warnings=json.dumps(allergy_warnings),
|
| 163 |
+
usage_recommendations=usage_recommendations,
|
| 164 |
+
health_insights=json.dumps(health_insights),
|
| 165 |
+
ingredient_interactions=json.dumps(ingredient_interactions),
|
| 166 |
+
key_takeaway=json.dumps(key_takeaway),
|
| 167 |
+
ingredients_count=results.get("ingredients_count", 0),
|
| 168 |
+
user_id=current_user_id, # Can be updated later if needed
|
| 169 |
+
timestamp=results.get("timestamp", datetime.now().isoformat()),
|
| 170 |
+
ingredient_ids=json.dumps(results.get("ingredient_ids", [])),
|
| 171 |
)
|
| 172 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 173 |
|
| 174 |
+
# Add product to database
|
| 175 |
product_repo = ProductRepository(db)
|
| 176 |
product = product_repo.add_product(product_create_data)
|
| 177 |
+
|
| 178 |
+
print(product)
|
| 179 |
+
|
| 180 |
+
# Add Vuforia target if needed
|
| 181 |
+
await add_product_to_database(product.id, [image_name], db, {
|
| 182 |
+
"name": name,
|
| 183 |
+
"ingredients": ingredients_list,
|
| 184 |
+
"image_name": image_name,
|
| 185 |
+
})
|
| 186 |
+
|
| 187 |
+
return JSONResponse({
|
| 188 |
+
"message": "Product data and image processed successfully",
|
| 189 |
+
"product_id": product.id,
|
| 190 |
+
"image_name": image_name
|
| 191 |
+
})
|
| 192 |
except Exception as e:
|
| 193 |
+
log_error(f"Error creating product: {e}", e)
|
| 194 |
+
print(e)
|
| 195 |
+
return JSONResponse({"error": str(e)}, status_code=500)
|
| 196 |
|
| 197 |
|
| 198 |
@router.post("/process_image")
|
| 199 |
+
async def process_image_endpoint(image: UploadFile = File(...), db: Session = Depends(get_db), request: Request = None):
|
| 200 |
"""
|
| 201 |
Receives an image file, performs object detection, and returns information about detected objects.
|
| 202 |
"""
|
| 203 |
log_info("Process image endpoint called")
|
| 204 |
try:
|
| 205 |
# Read image from the uploaded file
|
| 206 |
+
image_data = await image.read()
|
| 207 |
image = Image.open(io.BytesIO(image_data)).convert("RGB")
|
| 208 |
|
| 209 |
+
# Run object detection with the request object
|
| 210 |
+
results, image_np = run_object_detection(image, request)
|
| 211 |
|
| 212 |
# Get filtered class boxes
|
| 213 |
+
box, class_name, score = get_filtered_class_boxes(results)
|
| 214 |
+
|
| 215 |
+
# Check if any objects were detected
|
| 216 |
+
if box is None:
|
| 217 |
+
log_info("No food objects detected in image")
|
| 218 |
+
# if send dummy target is allowed send default image
|
| 219 |
+
if SEND_FAKE_TARGET:
|
| 220 |
+
return JSONResponse({
|
| 221 |
+
"class_name": "food",
|
| 222 |
+
"score": float(0.24),
|
| 223 |
+
"image_name": FAKE_TARGET_IMAGE_NAME,
|
| 224 |
+
"detected": True
|
| 225 |
+
})
|
| 226 |
+
return JSONResponse({
|
| 227 |
+
"error": "No food objects detected in the image",
|
| 228 |
+
"detected": False
|
| 229 |
+
}, status_code=400)
|
| 230 |
+
|
| 231 |
+
# Crop the detected object
|
| 232 |
+
cropped_img = crop_image(image_np, box)
|
| 233 |
+
|
| 234 |
+
# Save the cropped image temporarily
|
| 235 |
+
unique_id = uuid.uuid4().hex
|
| 236 |
+
cropped_image_name = f"detected_{class_name}_{score:.2f}_{unique_id}.jpg"
|
| 237 |
+
cropped_image_path = os.path.join(
|
| 238 |
+
UPLOADED_IMAGES_DIR, cropped_image_name
|
| 239 |
+
)
|
| 240 |
+
cropped_img.save(cropped_image_path)
|
| 241 |
+
|
| 242 |
+
return JSONResponse({
|
| 243 |
+
"class_name": class_name,
|
| 244 |
+
"score": float(score),
|
| 245 |
+
"image_name": cropped_image_name,
|
| 246 |
+
"detected": True
|
| 247 |
+
})
|
| 248 |
except Exception as e:
|
| 249 |
+
log_error(f"Error processing image: {e}", e)
|
| 250 |
raise HTTPException(status_code=500, detail=f"Error processing image: {e}")
|
| 251 |
|
| 252 |
|
|
|
|
| 279 |
if os.path.exists(image_path):
|
| 280 |
return FileResponse(image_path, media_type="image/jpeg")
|
| 281 |
else:
|
| 282 |
+
return JSONResponse({"error": "Image not found"}, status_code=404)
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
# In your API, add an endpoint like:
|
| 286 |
+
@router.get("/marker/{vuforia_id}")
|
| 287 |
+
async def get_product_by_marker(vuforia_id: str, db: Session = Depends(get_db)):
|
| 288 |
+
marker = db.query(Marker).filter(Marker.vuforia_id == vuforia_id).first()
|
| 289 |
+
if not marker:
|
| 290 |
+
raise HTTPException(status_code=404, detail="Target not found")
|
| 291 |
+
|
| 292 |
+
product = db.query(Product).filter(Product.id == marker.product_id).first()
|
| 293 |
+
return product
|
services/auth_service.py
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
from passlib.context import CryptContext
|
| 2 |
from jose import JWTError, jwt
|
| 3 |
from datetime import datetime, timedelta
|
| 4 |
-
from fastapi import Depends, HTTPException, status
|
| 5 |
-
from fastapi.security import OAuth2PasswordBearer
|
| 6 |
from sqlalchemy import func
|
| 7 |
from sqlalchemy.orm import Session,Mapped
|
| 8 |
from db.database import get_db
|
|
@@ -19,7 +19,8 @@ ACCESS_TOKEN_EXPIRE_MINUTES = 30
|
|
| 19 |
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
|
| 20 |
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
|
| 21 |
|
| 22 |
-
|
|
|
|
| 23 |
|
| 24 |
def verify_password(plain_password, hashed_password):
|
| 25 |
log_info("Verifying password")
|
|
@@ -63,7 +64,101 @@ def create_access_token(data: dict, expires_delta: timedelta | None = None):
|
|
| 63 |
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
|
| 64 |
return encoded_jwt
|
| 65 |
|
| 66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
log_info("Getting current user")
|
| 68 |
credentials_exception = HTTPException(
|
| 69 |
status_code=status.HTTP_401_UNAUTHORIZED,
|
|
@@ -87,7 +182,7 @@ async def get_current_user(db: Session = Depends(get_db), token: str = Depends(o
|
|
| 87 |
raise credentials_exception
|
| 88 |
return user
|
| 89 |
|
| 90 |
-
async def
|
| 91 |
log_info("Getting current active user")
|
| 92 |
try:
|
| 93 |
if not current_user.is_active:
|
|
|
|
| 1 |
from passlib.context import CryptContext
|
| 2 |
from jose import JWTError, jwt
|
| 3 |
from datetime import datetime, timedelta
|
| 4 |
+
from fastapi import Depends, HTTPException, status, Request
|
| 5 |
+
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
|
| 6 |
from sqlalchemy import func
|
| 7 |
from sqlalchemy.orm import Session,Mapped
|
| 8 |
from db.database import get_db
|
|
|
|
| 19 |
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
|
| 20 |
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
|
| 21 |
|
| 22 |
+
# Create an optional OAuth2 scheme that doesn't auto-error
|
| 23 |
+
oauth2_scheme_optional = OAuth2PasswordBearer(tokenUrl="token", auto_error=False)
|
| 24 |
|
| 25 |
def verify_password(plain_password, hashed_password):
|
| 26 |
log_info("Verifying password")
|
|
|
|
| 64 |
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
|
| 65 |
return encoded_jwt
|
| 66 |
|
| 67 |
+
# New flexible token extractor
|
| 68 |
+
async def get_token_from_request(request: Request = None, oauth_token: str = None):
|
| 69 |
+
"""Extract token from various sources, prioritizing standard formats but
|
| 70 |
+
supporting Hugging Face Spaces custom headers"""
|
| 71 |
+
|
| 72 |
+
# First try the standard OAuth2 token if provided
|
| 73 |
+
if oauth_token:
|
| 74 |
+
return oauth_token
|
| 75 |
+
|
| 76 |
+
if request is None:
|
| 77 |
+
return None
|
| 78 |
+
|
| 79 |
+
# Try standard Authorization header (works in local development)
|
| 80 |
+
auth_header = request.headers.get("Authorization")
|
| 81 |
+
if auth_header and auth_header.startswith("Bearer "):
|
| 82 |
+
return auth_header.replace("Bearer ", "")
|
| 83 |
+
|
| 84 |
+
# Try Hugging Face's custom header
|
| 85 |
+
hf_token = request.headers.get("x-ip-token")
|
| 86 |
+
if hf_token:
|
| 87 |
+
log_info(f"Using token from Hugging Face x-ip-token header")
|
| 88 |
+
return hf_token
|
| 89 |
+
|
| 90 |
+
# Final fallback: check query parameters
|
| 91 |
+
token_param = request.query_params.get("token")
|
| 92 |
+
if token_param:
|
| 93 |
+
log_info(f"Using token from query parameter")
|
| 94 |
+
return token_param
|
| 95 |
+
|
| 96 |
+
return None
|
| 97 |
+
|
| 98 |
+
# Replace or add this function
|
| 99 |
+
async def get_current_user(
|
| 100 |
+
request: Request,
|
| 101 |
+
db: Session = Depends(get_db),
|
| 102 |
+
oauth_token: str = Depends(oauth2_scheme_optional)
|
| 103 |
+
):
|
| 104 |
+
"""Enhanced user authentication that supports both standard OAuth2
|
| 105 |
+
and Hugging Face Spaces deployments"""
|
| 106 |
+
|
| 107 |
+
log_info("Getting current user with flexible auth")
|
| 108 |
+
credentials_exception = HTTPException(
|
| 109 |
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
| 110 |
+
detail="Could not validate credentials",
|
| 111 |
+
headers={"WWW-Authenticate": "Bearer"},
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
# Get token from any available source
|
| 115 |
+
token = await get_token_from_request(request, oauth_token)
|
| 116 |
+
|
| 117 |
+
if not token:
|
| 118 |
+
log_error("No authentication token found")
|
| 119 |
+
raise credentials_exception
|
| 120 |
+
|
| 121 |
+
try:
|
| 122 |
+
# Try to decode the token
|
| 123 |
+
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
|
| 124 |
+
email: str = payload.get("sub")
|
| 125 |
+
if email is None:
|
| 126 |
+
log_error("Token missing 'sub' claim")
|
| 127 |
+
raise credentials_exception
|
| 128 |
+
|
| 129 |
+
token_data = TokenData(email=email)
|
| 130 |
+
|
| 131 |
+
except JWTError as e:
|
| 132 |
+
log_error(f"JWT verification failed: {str(e)}", e)
|
| 133 |
+
raise credentials_exception
|
| 134 |
+
|
| 135 |
+
except Exception as e:
|
| 136 |
+
log_error(f"Token processing error: {str(e)}", e)
|
| 137 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 138 |
+
|
| 139 |
+
# Find the user
|
| 140 |
+
user = get_user(db, email=token_data.email)
|
| 141 |
+
if user is None:
|
| 142 |
+
log_error(f"User not found: {token_data.email}")
|
| 143 |
+
raise credentials_exception
|
| 144 |
+
|
| 145 |
+
return user
|
| 146 |
+
|
| 147 |
+
# Add this function for active users with flexible auth
|
| 148 |
+
async def get_current_active_user(
|
| 149 |
+
request: Request,
|
| 150 |
+
db: Session = Depends(get_db),
|
| 151 |
+
oauth_token: str = Depends(oauth2_scheme_optional)
|
| 152 |
+
):
|
| 153 |
+
"""Get active user with flexible authentication"""
|
| 154 |
+
current_user = await get_current_user(request, db, oauth_token)
|
| 155 |
+
|
| 156 |
+
if not current_user.is_active:
|
| 157 |
+
raise HTTPException(status_code=400, detail="Inactive user")
|
| 158 |
+
|
| 159 |
+
return UserResponse.from_orm(current_user)
|
| 160 |
+
|
| 161 |
+
async def get_current_user_old(db: Session = Depends(get_db), token: str = Depends(oauth2_scheme)):
|
| 162 |
log_info("Getting current user")
|
| 163 |
credentials_exception = HTTPException(
|
| 164 |
status_code=status.HTTP_401_UNAUTHORIZED,
|
|
|
|
| 182 |
raise credentials_exception
|
| 183 |
return user
|
| 184 |
|
| 185 |
+
async def get_current_active_user_old(current_user: User = Depends(get_current_user_old)):
|
| 186 |
log_info("Getting current active user")
|
| 187 |
try:
|
| 188 |
if not current_user.is_active:
|
services/ingredientFinderAgent.py
CHANGED
|
@@ -3,7 +3,6 @@ from functools import partial
|
|
| 3 |
import os
|
| 4 |
import json
|
| 5 |
import traceback
|
| 6 |
-
from dotenv import load_dotenv
|
| 7 |
from typing import Dict, Any
|
| 8 |
|
| 9 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
@@ -14,9 +13,7 @@ from logger_manager import log_debug, log_error, log_info, log_warning
|
|
| 14 |
from utils.agent_tools import search_local_db,search_web,search_wikipedia,search_open_food_facts,search_usda,search_pubchem
|
| 15 |
|
| 16 |
# Load environment variables from .env file
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
|
| 21 |
def create_summary_from_source(source: Dict[str, Any]) -> str:
|
| 22 |
"""Create a meaningful summary from source data."""
|
|
@@ -94,12 +91,9 @@ def analyze_ingredient(state: IngredientState) -> IngredientState:
|
|
| 94 |
Returns:
|
| 95 |
Updated state with analysis results
|
| 96 |
"""
|
| 97 |
-
# Get API key and model from environment
|
| 98 |
-
api_key = os.getenv("GOOGLE_API_KEY")
|
| 99 |
-
model_name = os.getenv("LLM_MODEL_NAME", "gemini-1.5-pro")
|
| 100 |
|
| 101 |
# Basic validation
|
| 102 |
-
if not
|
| 103 |
log_error("No Google API key found in environment variables")
|
| 104 |
new_state = state.copy()
|
| 105 |
new_state["result"] = {
|
|
@@ -114,8 +108,8 @@ def analyze_ingredient(state: IngredientState) -> IngredientState:
|
|
| 114 |
# Initialize LLM
|
| 115 |
try:
|
| 116 |
llm = ChatGoogleGenerativeAI(
|
| 117 |
-
|
| 118 |
-
model=
|
| 119 |
temperature=0.3, # Lower temperature for more factual responses
|
| 120 |
# convert_system_message_to_human=True
|
| 121 |
)
|
|
@@ -418,12 +412,34 @@ class IngredientInfoAgentLangGraph:
|
|
| 418 |
# Extract the result or create a default
|
| 419 |
if final_state.get("result"):
|
| 420 |
log_info(f"Analysis complete for {ingredient}")
|
| 421 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 422 |
else:
|
| 423 |
log_info(f"No result in final state for {ingredient}, returning default")
|
|
|
|
| 424 |
return IngredientAnalysisResult(
|
| 425 |
name=ingredient,
|
| 426 |
-
is_found=len(sources_data) > 0,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 427 |
details_with_source=sources_data
|
| 428 |
)
|
| 429 |
|
|
|
|
| 3 |
import os
|
| 4 |
import json
|
| 5 |
import traceback
|
|
|
|
| 6 |
from typing import Dict, Any
|
| 7 |
|
| 8 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
|
|
| 13 |
from utils.agent_tools import search_local_db,search_web,search_wikipedia,search_open_food_facts,search_usda,search_pubchem
|
| 14 |
|
| 15 |
# Load environment variables from .env file
|
| 16 |
+
from env import GOOGLE_API_KEY, LLM_MODEL_NAME
|
|
|
|
|
|
|
| 17 |
|
| 18 |
def create_summary_from_source(source: Dict[str, Any]) -> str:
|
| 19 |
"""Create a meaningful summary from source data."""
|
|
|
|
| 91 |
Returns:
|
| 92 |
Updated state with analysis results
|
| 93 |
"""
|
|
|
|
|
|
|
|
|
|
| 94 |
|
| 95 |
# Basic validation
|
| 96 |
+
if not GOOGLE_API_KEY:
|
| 97 |
log_error("No Google API key found in environment variables")
|
| 98 |
new_state = state.copy()
|
| 99 |
new_state["result"] = {
|
|
|
|
| 108 |
# Initialize LLM
|
| 109 |
try:
|
| 110 |
llm = ChatGoogleGenerativeAI(
|
| 111 |
+
google_GOOGLE_API_KEY=GOOGLE_API_KEY,
|
| 112 |
+
model=LLM_MODEL_NAME,
|
| 113 |
temperature=0.3, # Lower temperature for more factual responses
|
| 114 |
# convert_system_message_to_human=True
|
| 115 |
)
|
|
|
|
| 412 |
# Extract the result or create a default
|
| 413 |
if final_state.get("result"):
|
| 414 |
log_info(f"Analysis complete for {ingredient}")
|
| 415 |
+
# Ensure id field is present
|
| 416 |
+
if "id" not in final_state["result"]:
|
| 417 |
+
final_state["result"]["id"] = 0 # Will be replaced with actual DB ID
|
| 418 |
+
|
| 419 |
+
result = IngredientAnalysisResult(**final_state["result"])
|
| 420 |
+
|
| 421 |
+
# Save to database using SessionLocal
|
| 422 |
+
from db.database import SessionLocal
|
| 423 |
+
from db.repositories import IngredientRepository
|
| 424 |
+
|
| 425 |
+
with SessionLocal() as db:
|
| 426 |
+
repo = IngredientRepository(db)
|
| 427 |
+
db_ingredient = repo.create_ingredient(result)
|
| 428 |
+
# Update with real database ID
|
| 429 |
+
result.id = db_ingredient.id
|
| 430 |
+
|
| 431 |
+
return result
|
| 432 |
else:
|
| 433 |
log_info(f"No result in final state for {ingredient}, returning default")
|
| 434 |
+
# Include id field in default result
|
| 435 |
return IngredientAnalysisResult(
|
| 436 |
name=ingredient,
|
| 437 |
+
is_found=len(sources_data) > 0,
|
| 438 |
+
id=0, # Required field
|
| 439 |
+
alternate_names=[],
|
| 440 |
+
safety_rating=0,
|
| 441 |
+
description="No reliable information found",
|
| 442 |
+
health_effects=["Unknown"],
|
| 443 |
details_with_source=sources_data
|
| 444 |
)
|
| 445 |
|
services/productAnalyzerAgent.py
CHANGED
|
@@ -1,13 +1,12 @@
|
|
| 1 |
import os
|
| 2 |
from typing import List, Dict, Any, Optional
|
| 3 |
-
from dotenv import load_dotenv
|
| 4 |
from langchain_core.messages import HumanMessage
|
| 5 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 6 |
from logger_manager import log_error, log_info
|
| 7 |
from interfaces.ingredientModels import IngredientAnalysisResult
|
| 8 |
|
| 9 |
# Load environment variables
|
| 10 |
-
|
| 11 |
|
| 12 |
async def analyze_product_ingredients(
|
| 13 |
ingredients_data: List[IngredientAnalysisResult],
|
|
@@ -19,13 +18,10 @@ async def analyze_product_ingredients(
|
|
| 19 |
"""
|
| 20 |
log_info(f"Analyzing product with {len(ingredients_data)} ingredients")
|
| 21 |
|
| 22 |
-
# Initialize LLM
|
| 23 |
-
api_key = os.getenv("LLM_API_KEY")
|
| 24 |
-
model_name = os.getenv("LLM_MODEL_NAME", "gemini-2.0-flash")
|
| 25 |
-
|
| 26 |
llm = ChatGoogleGenerativeAI(
|
| 27 |
-
|
| 28 |
-
model=
|
| 29 |
temperature=0.2 # Lower temperature for more factual responses
|
| 30 |
)
|
| 31 |
|
|
@@ -50,6 +46,8 @@ Description: {ingredient.description[:200] + '...' if len(ingredient.description
|
|
| 50 |
allergies = user_preferences.get("allergies", "None specified")
|
| 51 |
diet = user_preferences.get("dietary_restrictions", "None specified")
|
| 52 |
user_context = f"""
|
|
|
|
|
|
|
| 53 |
User has the following preferences:
|
| 54 |
- Dietary Restrictions: {diet}
|
| 55 |
- Allergies: {allergies}
|
|
@@ -66,7 +64,6 @@ analysis that would be helpful for a consumer viewing this in an AR application.
|
|
| 66 |
## INGREDIENTS INFORMATION:
|
| 67 |
{''.join(ingredients_summary)}
|
| 68 |
|
| 69 |
-
## Also consider the following user preferences:
|
| 70 |
{user_context}
|
| 71 |
|
| 72 |
## REQUIRED ANALYSIS:
|
|
|
|
| 1 |
import os
|
| 2 |
from typing import List, Dict, Any, Optional
|
|
|
|
| 3 |
from langchain_core.messages import HumanMessage
|
| 4 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 5 |
from logger_manager import log_error, log_info
|
| 6 |
from interfaces.ingredientModels import IngredientAnalysisResult
|
| 7 |
|
| 8 |
# Load environment variables
|
| 9 |
+
from env import LLM_API_KEY, LLM_MODEL_NAME
|
| 10 |
|
| 11 |
async def analyze_product_ingredients(
|
| 12 |
ingredients_data: List[IngredientAnalysisResult],
|
|
|
|
| 18 |
"""
|
| 19 |
log_info(f"Analyzing product with {len(ingredients_data)} ingredients")
|
| 20 |
|
| 21 |
+
# Initialize LLM
|
|
|
|
|
|
|
|
|
|
| 22 |
llm = ChatGoogleGenerativeAI(
|
| 23 |
+
google_LLM_API_KEY=LLM_API_KEY,
|
| 24 |
+
model=LLM_MODEL_NAME,
|
| 25 |
temperature=0.2 # Lower temperature for more factual responses
|
| 26 |
)
|
| 27 |
|
|
|
|
| 46 |
allergies = user_preferences.get("allergies", "None specified")
|
| 47 |
diet = user_preferences.get("dietary_restrictions", "None specified")
|
| 48 |
user_context = f"""
|
| 49 |
+
## Also consider the following user preferences:
|
| 50 |
+
|
| 51 |
User has the following preferences:
|
| 52 |
- Dietary Restrictions: {diet}
|
| 53 |
- Allergies: {allergies}
|
|
|
|
| 64 |
## INGREDIENTS INFORMATION:
|
| 65 |
{''.join(ingredients_summary)}
|
| 66 |
|
|
|
|
| 67 |
{user_context}
|
| 68 |
|
| 69 |
## REQUIRED ANALYSIS:
|
uploaded_images/detected_Snack_0.13_db8318a668504073ad5fd0677187d305.jpg
ADDED
|
utils/agent_tools.py
CHANGED
|
@@ -2,12 +2,10 @@ import asyncio
|
|
| 2 |
import os
|
| 3 |
|
| 4 |
import pandas as pd
|
| 5 |
-
from dotenv import load_dotenv
|
| 6 |
|
| 7 |
from typing import Dict, Any
|
| 8 |
# modular
|
| 9 |
from logger_manager import log_error, log_info, log_warning
|
| 10 |
-
from dotenv import load_dotenv
|
| 11 |
|
| 12 |
import aiohttp
|
| 13 |
import time
|
|
@@ -20,7 +18,7 @@ from langchain_core.tools import tool
|
|
| 20 |
|
| 21 |
|
| 22 |
# Load environment variables from .env file
|
| 23 |
-
|
| 24 |
|
| 25 |
# Load Scraped Database
|
| 26 |
SCRAPED_DB_PATH = "data/Food_Aditives_E_numbers.csv" # Ensure this file exists
|
|
@@ -32,15 +30,6 @@ else:
|
|
| 32 |
log_warning("Scraped database not found!")
|
| 33 |
|
| 34 |
|
| 35 |
-
# Define a rate limit (adjust as needed)
|
| 36 |
-
PUBCHEM_TIMEOUT = float(os.getenv("PUBCHEM_TIMEOUT", "2.0")) # seconds
|
| 37 |
-
PUBCHEM_MAX_RETRIES = int(os.getenv("PUBCHEM_MAX_RETRIES", "3")) # Max retries
|
| 38 |
-
|
| 39 |
-
# Rate limiting configuration
|
| 40 |
-
DUCKDUCKGO_RATE_LIMIT_DELAY = float(os.getenv("DUCKDUCKGO_RATE_LIMIT_DELAY", "2.0")) # Delay in seconds
|
| 41 |
-
DUCKDUCKGO_MAX_RETRIES = int(os.getenv("DUCKDUCKGO_MAX_RETRIES", "3")) # Max retries
|
| 42 |
-
|
| 43 |
-
|
| 44 |
# Define tool functions
|
| 45 |
@tool("search_local_db")
|
| 46 |
def search_local_db(ingredient: str) -> Dict[str, Any]:
|
|
@@ -98,12 +87,11 @@ def search_usda(ingredient: str) -> Dict[str, Any]:
|
|
| 98 |
|
| 99 |
try:
|
| 100 |
usda_api = "https://api.nal.usda.gov/fdc/v1"
|
| 101 |
-
usda_api_key = os.getenv("USDA_API_KEY", "DEMO_KEY") # Use DEMO_KEY if not provided
|
| 102 |
|
| 103 |
# Search for the ingredient
|
| 104 |
search_url = f"{usda_api}/foods/search"
|
| 105 |
params = {
|
| 106 |
-
"api_key":
|
| 107 |
"query": ingredient,
|
| 108 |
"dataType": ["Foundation", "SR Legacy", "Branded"],
|
| 109 |
"pageSize": 5
|
|
|
|
| 2 |
import os
|
| 3 |
|
| 4 |
import pandas as pd
|
|
|
|
| 5 |
|
| 6 |
from typing import Dict, Any
|
| 7 |
# modular
|
| 8 |
from logger_manager import log_error, log_info, log_warning
|
|
|
|
| 9 |
|
| 10 |
import aiohttp
|
| 11 |
import time
|
|
|
|
| 18 |
|
| 19 |
|
| 20 |
# Load environment variables from .env file
|
| 21 |
+
from env import PUBCHEM_MAX_RETRIES, PUBCHEM_TIMEOUT,DUCKDUCKGO_MAX_RETRIES,DUCKDUCKGO_RATE_LIMIT_DELAY,USDA_API_KEY
|
| 22 |
|
| 23 |
# Load Scraped Database
|
| 24 |
SCRAPED_DB_PATH = "data/Food_Aditives_E_numbers.csv" # Ensure this file exists
|
|
|
|
| 30 |
log_warning("Scraped database not found!")
|
| 31 |
|
| 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
# Define tool functions
|
| 34 |
@tool("search_local_db")
|
| 35 |
def search_local_db(ingredient: str) -> Dict[str, Any]:
|
|
|
|
| 87 |
|
| 88 |
try:
|
| 89 |
usda_api = "https://api.nal.usda.gov/fdc/v1"
|
|
|
|
| 90 |
|
| 91 |
# Search for the ingredient
|
| 92 |
search_url = f"{usda_api}/foods/search"
|
| 93 |
params = {
|
| 94 |
+
"api_key": USDA_API_KEY,
|
| 95 |
"query": ingredient,
|
| 96 |
"dataType": ["Foundation", "SR Legacy", "Branded"],
|
| 97 |
"pageSize": 5
|
utils/analyze.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
from datetime import datetime
|
| 3 |
+
import pytz
|
| 4 |
+
from typing import List, Dict, Any
|
| 5 |
+
from logger_manager import log_info, log_error
|
| 6 |
+
from services.productAnalyzerAgent import analyze_product_ingredients
|
| 7 |
+
from utils.ingredient_utils import process_single_ingredient
|
| 8 |
+
|
| 9 |
+
# Load environment variables
|
| 10 |
+
from env import PARALLEL_RATE_LIMIT
|
| 11 |
+
|
| 12 |
+
log_info(f"Using parallel rate limit of {PARALLEL_RATE_LIMIT}")
|
| 13 |
+
|
| 14 |
+
# Create a semaphore to limit concurrent API calls
|
| 15 |
+
llm_semaphore = asyncio.Semaphore(PARALLEL_RATE_LIMIT)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
async def process_product_ingredients(product_ingredients: List[str]) -> Dict[str, Any]:
|
| 19 |
+
log_info(f"process_product_ingredients called for {len(product_ingredients)} ingredients")
|
| 20 |
+
try:
|
| 21 |
+
# Step 1: Process individual ingredients
|
| 22 |
+
ingredient_results = []
|
| 23 |
+
|
| 24 |
+
log_info(f"Starting parallel ingredient processing with rate limit {PARALLEL_RATE_LIMIT}")
|
| 25 |
+
|
| 26 |
+
# Create tasks for parallel processing
|
| 27 |
+
tasks = []
|
| 28 |
+
for ingredient_name in product_ingredients:
|
| 29 |
+
task = process_single_ingredient(ingredient_name)
|
| 30 |
+
tasks.append(task)
|
| 31 |
+
|
| 32 |
+
# Execute tasks concurrently with rate limiting
|
| 33 |
+
ingredient_results = await asyncio.gather(*tasks)
|
| 34 |
+
log_info(f"Completed parallel processing of {len(ingredient_results)} ingredients")
|
| 35 |
+
|
| 36 |
+
product_analysis = await analyze_product_ingredients(
|
| 37 |
+
ingredients_data=ingredient_results
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
# print("Product analysis result:", product_analysis)
|
| 41 |
+
|
| 42 |
+
# Step 3: Prepare final response
|
| 43 |
+
result = {
|
| 44 |
+
"ingredients_count": len(product_ingredients),
|
| 45 |
+
"processed_ingredients": ingredient_results,
|
| 46 |
+
"ingredient_ids": product_analysis["ingredient_ids"],
|
| 47 |
+
"overall_analysis": product_analysis,
|
| 48 |
+
"timestamp": datetime.now(tz=pytz.timezone('Asia/Kolkata')).isoformat()
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
log_info("process_product_ingredients completed successfully")
|
| 52 |
+
return result
|
| 53 |
+
|
| 54 |
+
except Exception as e:
|
| 55 |
+
log_error(f"Error in process_product_ingredients: {str(e)}",e)
|
| 56 |
+
return None
|
utils/db_utils.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
from sqlalchemy.orm import Session
|
| 2 |
from interfaces.ingredientModels import IngredientAnalysisResult
|
| 3 |
from interfaces.productModels import ProductCreate
|
|
@@ -6,21 +7,62 @@ from logger_manager import log_info, log_error
|
|
| 6 |
from fastapi import HTTPException
|
| 7 |
import os
|
| 8 |
from services.product_service import ProductService
|
| 9 |
-
from
|
|
|
|
|
|
|
| 10 |
|
| 11 |
|
| 12 |
def ingredient_db_to_pydantic(db_ingredient):
|
| 13 |
"""Convert a database ingredient model to a Pydantic model."""
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
|
| 26 |
async def add_product_to_database(
|
|
|
|
| 1 |
+
from typing import Dict, List,Any
|
| 2 |
from sqlalchemy.orm import Session
|
| 3 |
from interfaces.ingredientModels import IngredientAnalysisResult
|
| 4 |
from interfaces.productModels import ProductCreate
|
|
|
|
| 7 |
from fastapi import HTTPException
|
| 8 |
import os
|
| 9 |
from services.product_service import ProductService
|
| 10 |
+
from utils.vuforia_utils import add_target_to_vuforia
|
| 11 |
+
from env import UPLOADED_IMAGES_DIR # Assuming add_target_to_vuforia and UPLOADED_IMAGES_DIR are needed and will remain in product.py for now. If they are also moved, the import needs adjustment.
|
| 12 |
+
import json
|
| 13 |
|
| 14 |
|
| 15 |
def ingredient_db_to_pydantic(db_ingredient):
|
| 16 |
"""Convert a database ingredient model to a Pydantic model."""
|
| 17 |
+
try:
|
| 18 |
+
# Parse string fields that should be lists or dictionaries
|
| 19 |
+
if isinstance(db_ingredient.alternate_names, str):
|
| 20 |
+
alternate_names = json.loads(db_ingredient.alternate_names)
|
| 21 |
+
else:
|
| 22 |
+
alternate_names = db_ingredient.alternate_names or []
|
| 23 |
+
|
| 24 |
+
if isinstance(db_ingredient.health_effects, str):
|
| 25 |
+
health_effects = json.loads(db_ingredient.health_effects)
|
| 26 |
+
else:
|
| 27 |
+
health_effects = db_ingredient.health_effects or ["Unknown"]
|
| 28 |
+
|
| 29 |
+
# Handle details_with_source, which should be a list of dictionaries
|
| 30 |
+
if hasattr(db_ingredient, 'sources') and db_ingredient.sources:
|
| 31 |
+
details = []
|
| 32 |
+
for source in db_ingredient.sources:
|
| 33 |
+
if isinstance(source.data, str):
|
| 34 |
+
try:
|
| 35 |
+
details.append(json.loads(source.data))
|
| 36 |
+
except json.JSONDecodeError:
|
| 37 |
+
details.append({"source": "Unknown", "data": source.data})
|
| 38 |
+
else:
|
| 39 |
+
details.append(source.data)
|
| 40 |
+
else:
|
| 41 |
+
details = []
|
| 42 |
+
|
| 43 |
+
return IngredientAnalysisResult(
|
| 44 |
+
name=db_ingredient.name,
|
| 45 |
+
alternate_names=alternate_names,
|
| 46 |
+
is_found=True,
|
| 47 |
+
id=db_ingredient.id,
|
| 48 |
+
safety_rating=db_ingredient.safety_rating or 5,
|
| 49 |
+
description=db_ingredient.description or "No description available",
|
| 50 |
+
health_effects=health_effects,
|
| 51 |
+
details_with_source=details
|
| 52 |
+
)
|
| 53 |
+
except Exception as e:
|
| 54 |
+
log_error(f"Error converting DB ingredient to Pydantic model: {e}", e)
|
| 55 |
+
# Fallback with minimal valid data
|
| 56 |
+
return IngredientAnalysisResult(
|
| 57 |
+
name=db_ingredient.name,
|
| 58 |
+
alternate_names=[],
|
| 59 |
+
is_found=True,
|
| 60 |
+
id=db_ingredient.id,
|
| 61 |
+
safety_rating=db_ingredient.safety_rating or 5,
|
| 62 |
+
description=db_ingredient.description or "No description available",
|
| 63 |
+
health_effects=["Unknown"],
|
| 64 |
+
details_with_source=[]
|
| 65 |
+
)
|
| 66 |
|
| 67 |
|
| 68 |
async def add_product_to_database(
|
utils/external_api_utils.py
CHANGED
|
@@ -2,12 +2,8 @@ import requests
|
|
| 2 |
import json
|
| 3 |
import os
|
| 4 |
from logger_manager import log_info, log_error
|
| 5 |
-
from dotenv import load_dotenv
|
| 6 |
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
VUFORIA_SERVER_ACCESS_KEY = os.getenv("VUFORIA_SERVER_ACCESS_KEY")
|
| 10 |
-
VUFORIA_SERVER_SECRET_KEY = os.getenv("VUFORIA_SERVER_SECRET_KEY")
|
| 11 |
|
| 12 |
def get_vuforia_auth_headers():
|
| 13 |
"""
|
|
|
|
| 2 |
import json
|
| 3 |
import os
|
| 4 |
from logger_manager import log_info, log_error
|
|
|
|
| 5 |
|
| 6 |
+
from env import VUFORIA_SERVER_ACCESS_KEY, VUFORIA_SERVER_SECRET_KEY
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
def get_vuforia_auth_headers():
|
| 9 |
"""
|
utils/image_processing_utils.py
CHANGED
|
@@ -5,7 +5,7 @@ from PIL import Image, ImageDraw, ImageFont, ImageOps
|
|
| 5 |
import requests
|
| 6 |
from io import BytesIO
|
| 7 |
import os
|
| 8 |
-
|
| 9 |
|
| 10 |
# Load the model from TF Hub
|
| 11 |
# Cache the model globally
|
|
@@ -14,10 +14,6 @@ detector = hub.load("https://tfhub.dev/google/openimages_v4/ssd/mobilenet_v2/1")
|
|
| 14 |
# Classes you care about
|
| 15 |
TARGET_CLASSES = set(["Food processor", "Fast food", "Food", "Seafood", "Snack"])
|
| 16 |
|
| 17 |
-
UPLOADED_IMAGES_DIR = "uploaded_images"
|
| 18 |
-
if not os.path.exists(UPLOADED_IMAGES_DIR):
|
| 19 |
-
os.makedirs(UPLOADED_IMAGES_DIR)
|
| 20 |
-
|
| 21 |
|
| 22 |
def load_image_from_url(url, size=(640, 480)):
|
| 23 |
response = requests.get(url)
|
|
|
|
| 5 |
import requests
|
| 6 |
from io import BytesIO
|
| 7 |
import os
|
| 8 |
+
from env import UPLOADED_IMAGES_DIR
|
| 9 |
|
| 10 |
# Load the model from TF Hub
|
| 11 |
# Cache the model globally
|
|
|
|
| 14 |
# Classes you care about
|
| 15 |
TARGET_CLASSES = set(["Food processor", "Fast food", "Food", "Seafood", "Snack"])
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
def load_image_from_url(url, size=(640, 480)):
|
| 19 |
response = requests.get(url)
|
utils/ingredient_utils.py
CHANGED
|
@@ -4,61 +4,65 @@ from sqlalchemy.orm import Session
|
|
| 4 |
from db.database import SessionLocal
|
| 5 |
from db.repositories import IngredientRepository
|
| 6 |
from interfaces.ingredientModels import IngredientAnalysisResult
|
|
|
|
| 7 |
from services.ingredientFinderAgent import IngredientInfoAgentLangGraph
|
| 8 |
-
from dotenv import load_dotenv
|
| 9 |
from langsmith import traceable
|
| 10 |
import pytz
|
| 11 |
|
|
|
|
|
|
|
| 12 |
# Load environment variables
|
| 13 |
-
|
| 14 |
|
| 15 |
-
# Get rate limit from environment variable or use default
|
| 16 |
-
PARALLEL_RATE_LIMIT = int(os.getenv("PARALLEL_RATE_LIMIT", 10))
|
| 17 |
|
| 18 |
# Create a semaphore to limit concurrent API calls
|
| 19 |
llm_semaphore = asyncio.Semaphore(PARALLEL_RATE_LIMIT)
|
| 20 |
|
| 21 |
|
| 22 |
@traceable
|
| 23 |
-
async def process_single_ingredient(ingredient_name: str):
|
| 24 |
"""Process a single ingredient asynchronously with rate limiting"""
|
| 25 |
-
# Create a new DB session for this specific task to avoid conflicts
|
| 26 |
-
session = SessionLocal()
|
| 27 |
-
|
| 28 |
try:
|
| 29 |
-
#
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
except Exception as e:
|
| 51 |
-
|
|
|
|
| 52 |
return IngredientAnalysisResult(
|
| 53 |
name=ingredient_name,
|
| 54 |
is_found=False,
|
|
|
|
|
|
|
| 55 |
safety_rating=0,
|
| 56 |
-
description=
|
| 57 |
-
health_effects=["
|
| 58 |
-
allergic_info=[],
|
| 59 |
-
diet_type="unknown",
|
| 60 |
details_with_source=[]
|
| 61 |
-
)
|
| 62 |
-
finally:
|
| 63 |
-
# Important: Close the session when done
|
| 64 |
-
session.close()
|
|
|
|
| 4 |
from db.database import SessionLocal
|
| 5 |
from db.repositories import IngredientRepository
|
| 6 |
from interfaces.ingredientModels import IngredientAnalysisResult
|
| 7 |
+
from logger_manager import log_error, log_info
|
| 8 |
from services.ingredientFinderAgent import IngredientInfoAgentLangGraph
|
|
|
|
| 9 |
from langsmith import traceable
|
| 10 |
import pytz
|
| 11 |
|
| 12 |
+
from utils.db_utils import ingredient_db_to_pydantic
|
| 13 |
+
|
| 14 |
# Load environment variables
|
| 15 |
+
from env import PARALLEL_RATE_LIMIT
|
| 16 |
|
|
|
|
|
|
|
| 17 |
|
| 18 |
# Create a semaphore to limit concurrent API calls
|
| 19 |
llm_semaphore = asyncio.Semaphore(PARALLEL_RATE_LIMIT)
|
| 20 |
|
| 21 |
|
| 22 |
@traceable
|
| 23 |
+
async def process_single_ingredient(ingredient_name: str) -> IngredientAnalysisResult:
|
| 24 |
"""Process a single ingredient asynchronously with rate limiting"""
|
|
|
|
|
|
|
|
|
|
| 25 |
try:
|
| 26 |
+
# First check if ingredient exists in the database
|
| 27 |
+
with SessionLocal() as db:
|
| 28 |
+
repo = IngredientRepository(db)
|
| 29 |
+
db_ingredient = repo.get_ingredient_by_name(ingredient_name)
|
| 30 |
+
|
| 31 |
+
if db_ingredient:
|
| 32 |
+
log_info(f"Using cached ingredient data for: {ingredient_name}")
|
| 33 |
+
return ingredient_db_to_pydantic(db_ingredient)
|
| 34 |
+
|
| 35 |
+
# If not in database, process it
|
| 36 |
+
log_info(f"Processing new ingredient: {ingredient_name}")
|
| 37 |
+
ingredient_finder = IngredientInfoAgentLangGraph()
|
| 38 |
+
|
| 39 |
+
try:
|
| 40 |
+
result = await ingredient_finder.process_ingredient_async(ingredient_name)
|
| 41 |
+
except RuntimeError:
|
| 42 |
+
result = ingredient_finder.process_ingredient(ingredient_name)
|
| 43 |
+
|
| 44 |
+
# Important: Add an id field even for new ingredients
|
| 45 |
+
# You can use a temporary id (will be replaced when saved to DB)
|
| 46 |
+
result.id = 0 # Temporary ID
|
| 47 |
+
|
| 48 |
+
# Save to database for future use
|
| 49 |
+
with SessionLocal() as db:
|
| 50 |
+
repo = IngredientRepository(db)
|
| 51 |
+
db_ingredient = repo.create_ingredient(result)
|
| 52 |
+
# Update with the real database ID
|
| 53 |
+
result.id = db_ingredient.id
|
| 54 |
+
|
| 55 |
+
return result
|
| 56 |
except Exception as e:
|
| 57 |
+
log_error(f"Error processing ingredient {ingredient_name}: {e}", e)
|
| 58 |
+
# Return a minimal valid result for failed ingredients
|
| 59 |
return IngredientAnalysisResult(
|
| 60 |
name=ingredient_name,
|
| 61 |
is_found=False,
|
| 62 |
+
id=0, # Add this missing required field
|
| 63 |
+
alternate_names=[],
|
| 64 |
safety_rating=0,
|
| 65 |
+
description="Error processing this ingredient",
|
| 66 |
+
health_effects=["Unknown"],
|
|
|
|
|
|
|
| 67 |
details_with_source=[]
|
| 68 |
+
)
|
|
|
|
|
|
|
|
|
utils/vuforia_utils.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import hmac
|
| 3 |
+
import hashlib
|
| 4 |
+
import base64
|
| 5 |
+
import time
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
from logger_manager import log_info, log_error
|
| 8 |
+
import os
|
| 9 |
+
import aiohttp
|
| 10 |
+
|
| 11 |
+
from env import VUFORIA_SERVER_ACCESS_KEY, VUFORIA_SERVER_SECRET_KEY,UPLOADED_IMAGES_DIR
|
| 12 |
+
|
| 13 |
+
async def add_target_to_vuforia(image_name: str, image_path: str) -> str:
|
| 14 |
+
"""
|
| 15 |
+
Adds a target to the Vuforia database and returns the Vuforia target ID.
|
| 16 |
+
Implements proper Vuforia authentication and request format.
|
| 17 |
+
"""
|
| 18 |
+
log_info(f"Adding target {image_name} to Vuforia")
|
| 19 |
+
|
| 20 |
+
try:
|
| 21 |
+
# Read image data
|
| 22 |
+
with open(image_path, "rb") as image_file:
|
| 23 |
+
image_data = image_file.read()
|
| 24 |
+
|
| 25 |
+
# Base64 encode the image
|
| 26 |
+
image_base64 = base64.b64encode(image_data).decode('utf-8')
|
| 27 |
+
|
| 28 |
+
# Create request data
|
| 29 |
+
request_path = '/targets'
|
| 30 |
+
host = 'vws.vuforia.com'
|
| 31 |
+
url = f"https://{host}{request_path}"
|
| 32 |
+
|
| 33 |
+
# Create payload
|
| 34 |
+
payload = {
|
| 35 |
+
"name": image_name,
|
| 36 |
+
"width": 1.0, # Default width in scene units
|
| 37 |
+
"image": image_base64,
|
| 38 |
+
"active_flag": True,
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
# Convert payload to JSON
|
| 42 |
+
body = json.dumps(payload)
|
| 43 |
+
|
| 44 |
+
# Get current date in proper format for Vuforia
|
| 45 |
+
date = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
|
| 46 |
+
|
| 47 |
+
# Set content type
|
| 48 |
+
content_type = 'application/json'
|
| 49 |
+
|
| 50 |
+
# Calculate MD5 of request body
|
| 51 |
+
content_md5 = hashlib.md5(body.encode('utf-8')).hexdigest()
|
| 52 |
+
|
| 53 |
+
# Create string to sign according to Vuforia docs
|
| 54 |
+
string_to_sign = f"POST\n{content_md5}\n{content_type}\n{date}\n{request_path}"
|
| 55 |
+
|
| 56 |
+
# Generate signature
|
| 57 |
+
signature = hmac.new(
|
| 58 |
+
VUFORIA_SERVER_SECRET_KEY.encode('utf-8'),
|
| 59 |
+
string_to_sign.encode('utf-8'),
|
| 60 |
+
hashlib.sha1
|
| 61 |
+
).digest()
|
| 62 |
+
signature_hex = base64.b64encode(signature).decode('utf-8')
|
| 63 |
+
|
| 64 |
+
# Create headers
|
| 65 |
+
headers = {
|
| 66 |
+
'Authorization': f'VWS {VUFORIA_SERVER_ACCESS_KEY}:{signature_hex}',
|
| 67 |
+
'Content-Type': content_type,
|
| 68 |
+
'Date': date,
|
| 69 |
+
'Content-MD5': content_md5
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
# Make the API request
|
| 73 |
+
async with aiohttp.ClientSession() as session:
|
| 74 |
+
async with session.post(url, headers=headers, data=body) as response:
|
| 75 |
+
# Get response text and try to parse as JSON
|
| 76 |
+
response_text = await response.text()
|
| 77 |
+
try:
|
| 78 |
+
response_json = json.loads(response_text)
|
| 79 |
+
except:
|
| 80 |
+
response_json = {"error": "Failed to parse response"}
|
| 81 |
+
|
| 82 |
+
log_info(f"Vuforia response status: {response.status}")
|
| 83 |
+
|
| 84 |
+
if response.status == 201: # Created
|
| 85 |
+
log_info(f"Target added successfully: {response_json}")
|
| 86 |
+
return response_json.get("target_id", "unknown_target_id")
|
| 87 |
+
else:
|
| 88 |
+
log_error(f"Failed to add target: Status {response.status}, Response: {response_text}")
|
| 89 |
+
raise Exception(f"Failed to add target {image_name}: Status {response.status}, Error: {response_json.get('result_code', 'Unknown')}")
|
| 90 |
+
|
| 91 |
+
except Exception as e:
|
| 92 |
+
log_error(f"Error adding target {image_name}: {e}", e)
|
| 93 |
+
raise
|