Spaces:
Runtime error
Runtime error
| import tensorflow as tf | |
| import gradio as gr | |
| import numpy as np | |
| import cv2 | |
| from PIL import Image | |
| import requests | |
| import torch | |
| from transformers import AutoModelForImageClassification, AutoProcessor | |
| # Step 1: Load Pre-trained Models | |
| mobilenet_model = tf.keras.applications.MobileNetV2(weights='imagenet', include_top=True) | |
| processor = AutoProcessor.from_pretrained("rajistics/finetuned-indian-food") | |
| indian_food_model = AutoModelForImageClassification.from_pretrained("rajistics/finetuned-indian-food") | |
| # USDA FoodData Central API Key | |
| api_key = "k6zCqkjnVABP8WTcCJCQb6l7G7JXturqsudhra5h" | |
| # Step 2: Image Preprocessing | |
| def preprocess_image(image): | |
| img = Image.open(image).convert("RGB") | |
| img = img.resize((224, 224)) | |
| img_array = tf.keras.preprocessing.image.img_to_array(img) | |
| img_array = np.expand_dims(img_array, axis=0) | |
| img_array = tf.keras.applications.mobilenet_v2.preprocess_input(img_array) | |
| return img_array | |
| def detect_food(image): | |
| # Preprocess the image for MobileNetV2 | |
| img_array = preprocess_image(image) | |
| preds = mobilenet_model.predict(img_array) | |
| decoded_preds = tf.keras.applications.mobilenet_v2.decode_predictions(preds, top=1)[0] | |
| food_item = decoded_preds[0][1] | |
| # Preprocess the image for the Indian food model | |
| inputs = processor(images=Image.open(image).convert("RGB"), return_tensors="pt") | |
| outputs = indian_food_model(**inputs) | |
| # Get the predicted class index (highest logit value) | |
| predicted_class_idx = outputs.logits.argmax(-1).item() | |
| # List of class labels for Indian food (modify this according to the model's classes) | |
| indian_food_labels = [ | |
| "Samosa", "Chole Bhature", "Biryani", "Dosa", "Paneer Butter Masala", # etc. | |
| # Add all the food names corresponding to the model's output | |
| ] | |
| # Get the Indian food label based on the predicted index | |
| indian_food = indian_food_labels[predicted_class_idx] | |
| return food_item, indian_food | |
| # Step 3: Retrieve Food Data | |
| def get_food_data(food_item): | |
| url = f"https://api.nal.usda.gov/fdc/v1/foods/search?query={food_item}&pageSize=1&api_key={api_key}" | |
| response = requests.get(url) | |
| data = response.json() | |
| if 'foods' in data and data['foods']: | |
| return data['foods'][0].get('foodNutrients', []) | |
| return [] | |
| # Step 4: Calculate Nutrients Based on User Inputs | |
| def calculate_nutrients(image, weight, volume): | |
| try: | |
| food_item, indian_food = detect_food(image) | |
| nutrients = get_food_data(food_item) | |
| result = f"Detected Food: {food_item} (Indian: {indian_food})\n" | |
| for nutrient in nutrients: | |
| name = nutrient['nutrientName'] | |
| value = nutrient['value'] * (weight / 100) # Scale by weight (default is per 100g) | |
| unit = nutrient['unitName'] | |
| result += f"{name}: {value:.2f} {unit}\n" | |
| # Add additional volume-based calculations if necessary | |
| if volume: | |
| result += f"\nVolume: {volume} ml considered for additional calculations.\n" | |
| return result | |
| except Exception as e: | |
| return f"Error: {e}" | |
| # Gradio Interface | |
| interface = gr.Interface( | |
| fn=calculate_nutrients, | |
| inputs=[gr.Image(type="filepath"), # Food Image | |
| gr.Slider(50, 500, value=100, step=10, label="Weight (g)"), # Weight Slider | |
| gr.Slider(50, 500, value=100, step=10, label="Volume (ml)") # Volume Slider | |
| ], | |
| outputs="text", | |
| title="Food Nutrient Calculator", | |
| description="Upload a food image and input its weight and volume to analyze the nutrient content." | |
| ) | |
| # Launch the Gradio App | |
| if __name__ == "__main__": | |
| interface.launch(share=True) | |