1MR commited on
Commit
3bcda3d
·
verified ·
1 Parent(s): d11f098

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +95 -124
app.py CHANGED
@@ -6,149 +6,120 @@ import shutil
6
  import os
7
  from huggingface_hub import InferenceClient
8
  import json
 
 
9
 
10
  # Initialize FastAPI app
11
  app = FastAPI()
12
 
13
- # Class labels
14
- class_labels = {
15
- 0: 'Baked Potato', 1: 'Burger', 2: 'Crispy Chicken', 3: 'Donut', 4: 'Fries',
16
- 5: 'Hot Dog', 6: 'Jalapeno', 7: 'Kiwi', 8: 'Lemon', 9: 'Lettuce',
17
- 10: 'Mango', 11: 'Onion', 12: 'Orange', 13: 'Pizza', 14: 'Taquito',
18
- 15: 'Apple', 16: 'Banana', 17: 'Beetroot', 18: 'Bell Pepper', 19: 'Bread',
19
- 20: 'Cabbage', 21: 'Carrot', 22: 'Cauliflower', 23: 'Cheese',
20
- 24: 'Chilli Pepper', 25: 'Corn', 26: 'Crab', 27: 'Cucumber',
21
- 28: 'Eggplant', 29: 'Eggs', 30: 'Garlic', 31: 'Ginger', 32: 'Grapes',
22
- 33: 'Milk', 34: 'Salmon', 35: 'Yogurt'
23
- }
24
-
25
- # Load the trained model
26
- model = tf.keras.models.load_model("model_unfreezeNewCorrectpredict.keras")
27
-
28
- # Image preprocessing function
29
- def load_and_prep_image(file_path, img_shape=224):
30
- img = tf.io.read_file(file_path)
31
- img = tf.image.decode_image(img, channels=3)
32
- img = tf.image.resize(img, size=[img_shape, img_shape])
33
- img = tf.expand_dims(img, axis=0)
34
- return img
35
-
36
- # Predict label function
37
- def predict_label(model, image_path, class_names):
38
- img = load_and_prep_image(image_path, img_shape=224)
39
- pred = model.predict(img)
40
- pred_class_index = np.argmax(pred, axis=1)[0]
41
- pred_class_name = class_names[pred_class_index]
42
- return pred_class_name
43
-
44
-
45
  @app.get("/")
46
- def read_root():
47
- return {"message": "This is My Nutrionguid App"}
48
-
49
- # API endpoint for prediction
50
- @app.post("/predict")
51
- async def predict_image(file: UploadFile = File(...)):
52
- try:
53
- # Save the uploaded file
54
- file_location = f"./temp_{file.filename}"
55
- with open(file_location, "wb") as f:
56
- shutil.copyfileobj(file.file, f)
 
57
 
58
- # Predict the label
59
- prediction = predict_label(model, file_location, class_labels)
60
 
61
- # Remove the temporary file
62
- os.remove(file_location)
63
 
64
- return {"predicted_label": prediction}
65
- except Exception as e:
66
- return JSONResponse(
67
- status_code=500,
68
- content={"error": f"An error occurred: {str(e)}"}
69
- )
70
- @app.post("/predictNUT")
71
- async def predict_image_and_nutrition(file: UploadFile = File(...)):
72
- try:
73
- # Save the uploaded file
74
- file_location = f"./temp_{file.filename}"
75
- with open(file_location, "wb") as f:
76
- shutil.copyfileobj(file.file, f)
77
 
78
- # Predict the label using the same prediction logic
79
- prediction = predict_label(model, file_location, class_labels)
80
 
81
- # Remove the temporary file
82
- os.remove(file_location)
83
-
84
- # Define the repository ID and your token
85
- #repo_id = "google/gemma-2-9b-it"
86
- repo_id = "Qwen/Qwen2.5-72B-Instruct"
87
- # repo_id = "microsoft/Phi-3-mini-4k-instruct"
88
- #repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
89
- api_token = "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21"
90
-
91
- # Initialize the InferenceClient with your token
92
- llm_client = InferenceClient(
93
- model=repo_id,
94
- token=api_token[:-2], # Pass the token here
95
- timeout=120,
96
- )
97
-
98
- # Function to call the LLM
99
- def call_llm(inference_client: InferenceClient, prompt: str):
100
- response = inference_client.post(
101
- json={
102
- "inputs": prompt,
103
- "parameters": {"max_new_tokens": 500},
104
- "task": "text-generation",
105
- },
106
- )
107
- return json.loads(response.decode())[0]["generated_text"]
108
 
109
- # Use the prediction to generate nutrition information
110
- # prompt = f"Nutrition information (Calories, Protein, Carbohydrates, Dietary Fiber, Sugars, Fat, Sodium, Potassium, Vitamin C, Vitamin B6, Folate, Niacin, Pantothenic acid) for {prediction} in formatted list"
111
- # # prompt = f"Provide all the nutrition information for {prediction}, including Calories, Protein, Carbohydrates, Dietary Fiber, Sugars, Fat, Sodium, Potassium, Vitamin C, Vitamin B6, Folate, Niacin, and Pantothenic acid. Please present the information in a clear, formatted list only, without additional explanations."
112
- # response = call_llm(llm_client, prompt)
113
 
114
- # return {"predicted_label": prediction, "nutrition_info": response}
115
 
116
- # nutrition_prompt = f"Provide the nutrition information (Calories, Protein, Carbohydrates, Dietary Fiber, Sugars, Fat, Sodium, Potassium, Vitamin C, Vitamin B6, Folate, Niacin, Pantothenic acid) for {prediction} per 100 grams in a formatted list only."
117
- nutrition_prompt = f"Provide the nutrition information (Calories, Protein, Carbohydrates, Dietary Fiber, Sugars, Fat, Sodium, Potassium, Vitamin C, Vitamin B6) for {prediction} per 100 grams, Output the information as a concise, formatted list without repetition."
118
- nutrition_info = call_llm(llm_client, nutrition_prompt)
119
 
120
- # # Second prompt: Health benefits and tips
121
- health_benefits_prompt = f"Provide the health benefits and considerations for {prediction}. Additionally, include practical tips for making {prediction} healthier. Keep the response focused on these two aspects only."
122
- # health_benefits_prompt = f"Provide detailed information about {prediction}, including its origin, common uses, cultural significance, and any interesting facts. Keep the response informative and well-structured."
123
- Information = call_llm(llm_client, health_benefits_prompt)
124
 
125
- recipes_prompt=f"Tell me about the two most famous recipes for {prediction}. Include the ingredients only."
126
- recipes_info=call_llm(llm_client, recipes_prompt)
127
 
128
- return {
129
- "Predicted_label": prediction,
130
- "Nutrition_info": nutrition_info,
131
- "Information": Information,
132
- "Recipes":recipes_info
133
- }
134
- except Exception as e:
135
- return JSONResponse(
136
- status_code=500,
137
- content={"error": f"An error occurred: {str(e)}"}
138
- )
139
 
140
 
141
 
142
 
143
- #nutrition_prompt = f"Provide the nutrition information (Calories, Protein, Carbohydrates, Dietary Fiber, Sugars, Fat, Sodium, Potassium, Vitamin C, Vitamin B6) for {prediction} in a formatted list only."
144
- # nutrition_info = call_llm(llm_client, nutrition_prompt)
145
 
146
- # # Second prompt: Health benefits and tips
147
- # health_benefits_prompt = f"Provide the health benefits and considerations for {prediction} and give tips for making it healthier."
148
- # health_benefits_and_tips = call_llm(llm_client, health_benefits_prompt)
149
 
150
- # return {
151
- # "predicted_label": prediction,
152
- # "nutrition_info": nutrition_info,
153
- # "health_benefits_and_tips": health_benefits_and_tips
154
- # }
 
6
  import os
7
  from huggingface_hub import InferenceClient
8
  import json
9
+ from langchain_community.agent_toolkits import GmailToolkit
10
+
11
 
12
  # Initialize FastAPI app
13
  app = FastAPI()
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  @app.get("/")
16
+ def read_root
17
+ toolkit = GmailToolkit()
18
+ return {"message": "Connection"}
19
+
20
+ # # API endpoint for prediction
21
+ # @app.post("/predict")
22
+ # async def predict_image(file: UploadFile = File(...)):
23
+ # try:
24
+ # # Save the uploaded file
25
+ # file_location = f"./temp_{file.filename}"
26
+ # with open(file_location, "wb") as f:
27
+ # shutil.copyfileobj(file.file, f)
28
 
29
+ # # Predict the label
30
+ # prediction = predict_label(model, file_location, class_labels)
31
 
32
+ # # Remove the temporary file
33
+ # os.remove(file_location)
34
 
35
+ # return {"predicted_label": prediction}
36
+ # except Exception as e:
37
+ # return JSONResponse(
38
+ # status_code=500,
39
+ # content={"error": f"An error occurred: {str(e)}"}
40
+ # )
41
+ # @app.post("/predictNUT")
42
+ # async def predict_image_and_nutrition(file: UploadFile = File(...)):
43
+ # try:
44
+ # # Save the uploaded file
45
+ # file_location = f"./temp_{file.filename}"
46
+ # with open(file_location, "wb") as f:
47
+ # shutil.copyfileobj(file.file, f)
48
 
49
+ # # Predict the label using the same prediction logic
50
+ # prediction = predict_label(model, file_location, class_labels)
51
 
52
+ # # Remove the temporary file
53
+ # os.remove(file_location)
54
+
55
+ # # Define the repository ID and your token
56
+ # #repo_id = "google/gemma-2-9b-it"
57
+ # repo_id = "Qwen/Qwen2.5-72B-Instruct"
58
+ # # repo_id = "microsoft/Phi-3-mini-4k-instruct"
59
+ # #repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
60
+ # api_token = "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21"
61
+
62
+ # # Initialize the InferenceClient with your token
63
+ # llm_client = InferenceClient(
64
+ # model=repo_id,
65
+ # token=api_token[:-2], # Pass the token here
66
+ # timeout=120,
67
+ # )
68
+
69
+ # # Function to call the LLM
70
+ # def call_llm(inference_client: InferenceClient, prompt: str):
71
+ # response = inference_client.post(
72
+ # json={
73
+ # "inputs": prompt,
74
+ # "parameters": {"max_new_tokens": 500},
75
+ # "task": "text-generation",
76
+ # },
77
+ # )
78
+ # return json.loads(response.decode())[0]["generated_text"]
79
 
80
+ # # Use the prediction to generate nutrition information
81
+ # # prompt = f"Nutrition information (Calories, Protein, Carbohydrates, Dietary Fiber, Sugars, Fat, Sodium, Potassium, Vitamin C, Vitamin B6, Folate, Niacin, Pantothenic acid) for {prediction} in formatted list"
82
+ # # # prompt = f"Provide all the nutrition information for {prediction}, including Calories, Protein, Carbohydrates, Dietary Fiber, Sugars, Fat, Sodium, Potassium, Vitamin C, Vitamin B6, Folate, Niacin, and Pantothenic acid. Please present the information in a clear, formatted list only, without additional explanations."
83
+ # # response = call_llm(llm_client, prompt)
84
 
85
+ # # return {"predicted_label": prediction, "nutrition_info": response}
86
 
87
+ # # nutrition_prompt = f"Provide the nutrition information (Calories, Protein, Carbohydrates, Dietary Fiber, Sugars, Fat, Sodium, Potassium, Vitamin C, Vitamin B6, Folate, Niacin, Pantothenic acid) for {prediction} per 100 grams in a formatted list only."
88
+ # nutrition_prompt = f"Provide the nutrition information (Calories, Protein, Carbohydrates, Dietary Fiber, Sugars, Fat, Sodium, Potassium, Vitamin C, Vitamin B6) for {prediction} per 100 grams, Output the information as a concise, formatted list without repetition."
89
+ # nutrition_info = call_llm(llm_client, nutrition_prompt)
90
 
91
+ # # # Second prompt: Health benefits and tips
92
+ # health_benefits_prompt = f"Provide the health benefits and considerations for {prediction}. Additionally, include practical tips for making {prediction} healthier. Keep the response focused on these two aspects only."
93
+ # # health_benefits_prompt = f"Provide detailed information about {prediction}, including its origin, common uses, cultural significance, and any interesting facts. Keep the response informative and well-structured."
94
+ # Information = call_llm(llm_client, health_benefits_prompt)
95
 
96
+ # recipes_prompt=f"Tell me about the two most famous recipes for {prediction}. Include the ingredients only."
97
+ # recipes_info=call_llm(llm_client, recipes_prompt)
98
 
99
+ # return {
100
+ # "Predicted_label": prediction,
101
+ # "Nutrition_info": nutrition_info,
102
+ # "Information": Information,
103
+ # "Recipes":recipes_info
104
+ # }
105
+ # except Exception as e:
106
+ # return JSONResponse(
107
+ # status_code=500,
108
+ # content={"error": f"An error occurred: {str(e)}"}
109
+ # )
110
 
111
 
112
 
113
 
114
+ # #nutrition_prompt = f"Provide the nutrition information (Calories, Protein, Carbohydrates, Dietary Fiber, Sugars, Fat, Sodium, Potassium, Vitamin C, Vitamin B6) for {prediction} in a formatted list only."
115
+ # # nutrition_info = call_llm(llm_client, nutrition_prompt)
116
 
117
+ # # # Second prompt: Health benefits and tips
118
+ # # health_benefits_prompt = f"Provide the health benefits and considerations for {prediction} and give tips for making it healthier."
119
+ # # health_benefits_and_tips = call_llm(llm_client, health_benefits_prompt)
120
 
121
+ # # return {
122
+ # # "predicted_label": prediction,
123
+ # # "nutrition_info": nutrition_info,
124
+ # # "health_benefits_and_tips": health_benefits_and_tips
125
+ # # }