1MR's picture
Update app.py
7353982 verified
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import JSONResponse
import tensorflow as tf
import numpy as np
import shutil
import os
from huggingface_hub import InferenceClient
import json
# Initialize FastAPI app
app = FastAPI()
# Class labels
class_labels = {
0: 'Baked Potato', 1: 'Burger', 2: 'Crispy Chicken', 3: 'Donut', 4: 'Fries',
5: 'Hot Dog', 6: 'Jalapeno', 7: 'Kiwi', 8: 'Lemon', 9: 'Lettuce',
10: 'Mango', 11: 'Onion', 12: 'Orange', 13: 'Pizza', 14: 'Taquito',
15: 'Apple', 16: 'Banana', 17: 'Beetroot', 18: 'Bell Pepper', 19: 'Bread',
20: 'Cabbage', 21: 'Carrot', 22: 'Cauliflower', 23: 'Cheese',
24: 'Chilli Pepper', 25: 'Corn', 26: 'Crab', 27: 'Cucumber',
28: 'Eggplant', 29: 'Eggs', 30: 'Garlic', 31: 'Ginger', 32: 'Grapes',
33: 'Milk', 34: 'Salmon', 35: 'Yogurt'
}
# Load the trained model
model = tf.keras.models.load_model("model_unfreezeNewCorrectpredict.keras")
# Image preprocessing function
def load_and_prep_image(file_path, img_shape=224):
img = tf.io.read_file(file_path)
img = tf.image.decode_image(img, channels=3)
img = tf.image.resize(img, size=[img_shape, img_shape])
img = tf.expand_dims(img, axis=0)
return img
# Predict label function
def predict_label(model, image_path, class_names):
img = load_and_prep_image(image_path, img_shape=224)
pred = model.predict(img)
pred_class_index = np.argmax(pred, axis=1)[0]
pred_class_name = class_names[pred_class_index]
return pred_class_name
@app.get("/")
def read_root():
return {"message": "This is My Nutrionguid App"}
# API endpoint for prediction
@app.post("/predict")
async def predict_image(file: UploadFile = File(...)):
try:
# Save the uploaded file
file_location = f"./temp_{file.filename}"
with open(file_location, "wb") as f:
shutil.copyfileobj(file.file, f)
# Predict the label
prediction = predict_label(model, file_location, class_labels)
# Remove the temporary file
os.remove(file_location)
return {"predicted_label": prediction}
except Exception as e:
return JSONResponse(
status_code=500,
content={"error": f"An error occurred: {str(e)}"}
)
@app.post("/predictNUT")
async def predict_image_and_nutrition(file: UploadFile = File(...)):
try:
# Save the uploaded file
file_location = f"./temp_{file.filename}"
with open(file_location, "wb") as f:
shutil.copyfileobj(file.file, f)
# Predict the label using the same prediction logic
prediction = predict_label(model, file_location, class_labels)
# Remove the temporary file
os.remove(file_location)
# Define the repository ID and your token
#repo_id = "google/gemma-2-9b-it"
repo_id = "Qwen/Qwen2.5-72B-Instruct"
# repo_id = "microsoft/Phi-3-mini-4k-instruct"
#repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
api_token = "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12"
# Initialize the InferenceClient with your token
llm_client = InferenceClient(
model=repo_id,
token=api_token[:-2],
timeout=120,
)
# Function to call the LLM
def call_llm(inference_client: InferenceClient, prompt: str):
response = inference_client.text_generation(
prompt=prompt,
max_new_tokens=500,
temperature=0.7, # optional
)
return response
# Use the prediction to generate nutrition information
# prompt = f"Nutrition information (Calories, Protein, Carbohydrates, Dietary Fiber, Sugars, Fat, Sodium, Potassium, Vitamin C, Vitamin B6, Folate, Niacin, Pantothenic acid) for {prediction} in formatted list"
# # prompt = f"Provide all the nutrition information for {prediction}, including Calories, Protein, Carbohydrates, Dietary Fiber, Sugars, Fat, Sodium, Potassium, Vitamin C, Vitamin B6, Folate, Niacin, and Pantothenic acid. Please present the information in a clear, formatted list only, without additional explanations."
# response = call_llm(llm_client, prompt)
# return {"predicted_label": prediction, "nutrition_info": response}
# nutrition_prompt = f"Provide the nutrition information (Calories, Protein, Carbohydrates, Dietary Fiber, Sugars, Fat, Sodium, Potassium, Vitamin C, Vitamin B6, Folate, Niacin, Pantothenic acid) for {prediction} per 100 grams in a formatted list only."
nutrition_prompt = f"Provide the nutrition information (Calories, Protein, Carbohydrates, Dietary Fiber, Sugars, Fat, Sodium, Potassium, Vitamin C, Vitamin B6) for {prediction} per 100 grams, Output the information as a concise, formatted list without repetition."
nutrition_info = call_llm(llm_client, nutrition_prompt)
# # Second prompt: Health benefits and tips
health_benefits_prompt = f"Provide the health benefits and considerations for {prediction}. Additionally, include practical tips for making {prediction} healthier. Keep the response focused on these two aspects only."
# health_benefits_prompt = f"Provide detailed information about {prediction}, including its origin, common uses, cultural significance, and any interesting facts. Keep the response informative and well-structured."
Information = call_llm(llm_client, health_benefits_prompt)
recipes_prompt=f"Tell me about the two most famous recipes for {prediction}. Include the ingredients only."
recipes_info=call_llm(llm_client, recipes_prompt)
return {
"Predicted_label": prediction,
"Nutrition_info": nutrition_info,
"Information": Information,
"Recipes":recipes_info
}
except Exception as e:
return JSONResponse(
status_code=500,
content={"error": f"An error occurred: {str(e)}"}
)
#nutrition_prompt = f"Provide the nutrition information (Calories, Protein, Carbohydrates, Dietary Fiber, Sugars, Fat, Sodium, Potassium, Vitamin C, Vitamin B6) for {prediction} in a formatted list only."
# nutrition_info = call_llm(llm_client, nutrition_prompt)
# # Second prompt: Health benefits and tips
# health_benefits_prompt = f"Provide the health benefits and considerations for {prediction} and give tips for making it healthier."
# health_benefits_and_tips = call_llm(llm_client, health_benefits_prompt)
# return {
# "predicted_label": prediction,
# "nutrition_info": nutrition_info,
# "health_benefits_and_tips": health_benefits_and_tips
# }