File size: 5,029 Bytes
1d3b57e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4373597
a7da242
 
1d3b57e
 
 
 
 
 
 
 
 
 
 
 
 
 
ac28706
1d3b57e
 
 
 
 
 
9d6a162
 
1d3b57e
9d6a162
 
02fdcb5
9d6a162
 
02fdcb5
 
 
9d6a162
 
 
 
02fdcb5
9d6a162
1d3b57e
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import JSONResponse
import tensorflow as tf
import numpy as np
import shutil
import os
from huggingface_hub import InferenceClient
import json

# Initialize FastAPI app
app = FastAPI()

# Class labels
class_labels = {
    0: 'Baked Potato', 1: 'Burger', 2: 'Crispy Chicken', 3: 'Donut', 4: 'Fries',
    5: 'Hot Dog', 6: 'Jalapeno', 7: 'Kiwi', 8: 'Lemon', 9: 'Lettuce',
    10: 'Mango', 11: 'Onion', 12: 'Orange', 13: 'Pizza', 14: 'Taquito', 
    15: 'Apple', 16: 'Banana', 17: 'Beetroot', 18: 'Bell Pepper', 19: 'Bread',
    20: 'Cabbage', 21: 'Carrot', 22: 'Cauliflower', 23: 'Cheese',
    24: 'Chilli Pepper', 25: 'Corn', 26: 'Crab', 27: 'Cucumber',
    28: 'Eggplant', 29: 'Eggs', 30: 'Garlic', 31: 'Ginger', 32: 'Grapes',
    33: 'Milk', 34: 'Salmon', 35: 'Yogurt'
}

# Load the trained model
model = tf.keras.models.load_model("model_unfreezeNewCorrectpredict.keras")

# Image preprocessing function
def load_and_prep_image(file_path, img_shape=224):
    img = tf.io.read_file(file_path)
    img = tf.image.decode_image(img, channels=3)
    img = tf.image.resize(img, size=[img_shape, img_shape])
    img = tf.expand_dims(img, axis=0)
    return img

# Predict label function
def predict_label(model, image_path, class_names):
    img = load_and_prep_image(image_path, img_shape=224)
    pred = model.predict(img)
    pred_class_index = np.argmax(pred, axis=1)[0]
    pred_class_name = class_names[pred_class_index]
    return pred_class_name


@app.get("/")
def read_root():
    return {"message": "This is My Nutrionguid App"}

# API endpoint for prediction
@app.post("/predict")
async def predict_image(file: UploadFile = File(...)):
    try:
        # Save the uploaded file
        file_location = f"./temp_{file.filename}"
        with open(file_location, "wb") as f:
            shutil.copyfileobj(file.file, f)
        
        # Predict the label
        prediction = predict_label(model, file_location, class_labels)
        
        # Remove the temporary file
        os.remove(file_location)
        
        return {"predicted_label": prediction}
    except Exception as e:
        return JSONResponse(
            status_code=500,
            content={"error": f"An error occurred: {str(e)}"}
        )
@app.post("/predictNUT")
async def predict_image_and_nutrition(file: UploadFile = File(...)):
    try:
        # Save the uploaded file
        file_location = f"./temp_{file.filename}"
        with open(file_location, "wb") as f:
            shutil.copyfileobj(file.file, f)
        
        # Predict the label using the same prediction logic
        prediction = predict_label(model, file_location, class_labels)
        
        # Remove the temporary file
        os.remove(file_location)

        # Define the repository ID and your token
        #repo_id = "google/gemma-2-9b-it"
        repo_id = "microsoft/Phi-3-mini-4k-instruct"
        # repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
        api_token = "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21"

        # Initialize the InferenceClient with your token
        llm_client = InferenceClient(
            model=repo_id,
            token=api_token[:-2],  # Pass the token here
            timeout=120,
        )

        # Function to call the LLM
        def call_llm(inference_client: InferenceClient, prompt: str):
            response = inference_client.post(
                json={
                    "inputs": prompt,
                    "parameters": {"max_new_tokens": 500},
                    "task": "text-generation",
                },
            )
            return json.loads(response.decode())[0]["generated_text"]
        
        # Use the prediction to generate nutrition information
        # prompt = f"Nutrition information (Calories, Protein, Carbohydrates, Dietary Fiber, Sugars, Fat, Sodium, Potassium, Vitamin C, Vitamin B6, Folate, Niacin, Pantothenic acid) for {prediction} in formatted list"
        # response = call_llm(llm_client, prompt)

        # return {"predicted_label": prediction, "nutrition_info": response}

        nutrition_prompt = f"Provide the nutrition information (Calories, Protein, Carbohydrates, Dietary Fiber, Sugars, Fat, Sodium, Potassium, Vitamin C, Vitamin B6, Folate, Niacin, Pantothenic acid) for {prediction} in a formatted list only."
        nutrition_info = call_llm(llm_client, nutrition_prompt)

        # Second prompt: Health benefits and tips
        health_benefits_prompt = f"Provide the health benefits and considerations for {prediction} and give tips for making it healthier."
        health_benefits_and_tips = call_llm(llm_client, health_benefits_prompt)

        return {
            "predicted_label": prediction, 
            "nutrition_info": nutrition_info,
            "health_benefits_and_tips": health_benefits_and_tips
        }
    except Exception as e:
        return JSONResponse(
            status_code=500,
            content={"error": f"An error occurred: {str(e)}"}
        )