bridging the image classifying model and ingredients fact recognition model.
Browse files
index.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
import cv2
|
| 3 |
import os
|
| 4 |
-
from app import
|
| 5 |
|
| 6 |
# Create a folder to save captured images
|
| 7 |
if not os.path.exists("captured_images"):
|
|
@@ -15,6 +15,9 @@ def main():
|
|
| 15 |
# List of items
|
| 16 |
items = ['Item 1', 'Item 2', 'Item 3']
|
| 17 |
|
|
|
|
|
|
|
|
|
|
| 18 |
# Define content for each item
|
| 19 |
content = {
|
| 20 |
'Item 1': "This is the content for Item 1",
|
|
@@ -29,7 +32,7 @@ def main():
|
|
| 29 |
|
| 30 |
button_clicked = st.sidebar.button('Done')
|
| 31 |
if button_clicked:
|
| 32 |
-
displayRecipes()
|
| 33 |
|
| 34 |
|
| 35 |
# Create a VideoCapture object to access the webcam
|
|
@@ -47,12 +50,14 @@ def main():
|
|
| 47 |
# Display a placeholder for the video stream
|
| 48 |
video_placeholder = st.empty()
|
| 49 |
|
|
|
|
|
|
|
| 50 |
# Button to capture image
|
| 51 |
if st.button("Capture Image"):
|
| 52 |
image_path = capture_image(cap)
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
while True:
|
| 57 |
# Read a frame from the webcam
|
| 58 |
ret, frame = cap.read()
|
|
@@ -68,7 +73,7 @@ def main():
|
|
| 68 |
cap.release()
|
| 69 |
|
| 70 |
|
| 71 |
-
def displayRecipes():
|
| 72 |
items = [
|
| 73 |
{"title": "Recipe 1", "content": "Content for Item 1."},
|
| 74 |
{"title": "Recipe 2", "content": "Content for Item 2."},
|
|
@@ -79,6 +84,12 @@ def displayRecipes():
|
|
| 79 |
for item in items:
|
| 80 |
with st.expander(item["title"]):
|
| 81 |
st.write(item["content"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
|
| 84 |
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
import cv2
|
| 3 |
import os
|
| 4 |
+
from app import *
|
| 5 |
|
| 6 |
# Create a folder to save captured images
|
| 7 |
if not os.path.exists("captured_images"):
|
|
|
|
| 15 |
# List of items
|
| 16 |
items = ['Item 1', 'Item 2', 'Item 3']
|
| 17 |
|
| 18 |
+
#list to of Ingredients camptured
|
| 19 |
+
ingredientsList =["apple", "orange", "mango"] #list()
|
| 20 |
+
|
| 21 |
# Define content for each item
|
| 22 |
content = {
|
| 23 |
'Item 1': "This is the content for Item 1",
|
|
|
|
| 32 |
|
| 33 |
button_clicked = st.sidebar.button('Done')
|
| 34 |
if button_clicked:
|
| 35 |
+
displayRecipes(ingredientsList)
|
| 36 |
|
| 37 |
|
| 38 |
# Create a VideoCapture object to access the webcam
|
|
|
|
| 50 |
# Display a placeholder for the video stream
|
| 51 |
video_placeholder = st.empty()
|
| 52 |
|
| 53 |
+
|
| 54 |
+
|
| 55 |
# Button to capture image
|
| 56 |
if st.button("Capture Image"):
|
| 57 |
image_path = capture_image(cap)
|
| 58 |
+
classification = classifyImage(image_path)
|
| 59 |
+
ingredientsList.append(classification)
|
| 60 |
+
|
| 61 |
while True:
|
| 62 |
# Read a frame from the webcam
|
| 63 |
ret, frame = cap.read()
|
|
|
|
| 73 |
cap.release()
|
| 74 |
|
| 75 |
|
| 76 |
+
def displayRecipes(ingredientsList):
|
| 77 |
items = [
|
| 78 |
{"title": "Recipe 1", "content": "Content for Item 1."},
|
| 79 |
{"title": "Recipe 2", "content": "Content for Item 2."},
|
|
|
|
| 84 |
for item in items:
|
| 85 |
with st.expander(item["title"]):
|
| 86 |
st.write(item["content"])
|
| 87 |
+
#now we are gonna send the ingredient list to ask gpt
|
| 88 |
+
prompt = f"I have following Ingredients :{','.join(ingredientsList)}. What can I make with these \
|
| 89 |
+
Ingredients? give me possible recipe with Nutrition Facts per 100g."
|
| 90 |
+
LLMResult = askGPT(prompt)
|
| 91 |
+
print(LLMResult)
|
| 92 |
+
|
| 93 |
|
| 94 |
|
| 95 |
|