Spaces:
Sleeping
Sleeping
testing the addition of model
Browse files- app.py +131 -13
- backend.py +38 -0
- requirements.txt +46 -2
app.py
CHANGED
|
@@ -1,20 +1,138 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
-
|
| 3 |
-
|
|
|
|
| 4 |
|
| 5 |
-
|
|
|
|
|
|
|
| 6 |
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
+
import cv2
|
| 3 |
+
import os
|
| 4 |
+
from backend import *
|
| 5 |
|
| 6 |
+
# Create a folder to save captured images
|
| 7 |
+
if not os.path.exists("captured_images"):
|
| 8 |
+
os.makedirs("captured_images")
|
| 9 |
|
| 10 |
+
# Initialize the session state
|
| 11 |
+
session_state = st.session_state
|
| 12 |
+
if 'ingredientsList' not in session_state:
|
| 13 |
+
session_state['ingredientsList'] = ["apple", "banana", "orange", "strawberries"]
|
| 14 |
|
| 15 |
+
def main():
|
| 16 |
+
|
| 17 |
+
st.title('🧑🏽🍳 RecipeBud')
|
| 18 |
+
|
| 19 |
+
st.sidebar.header('Ingredients & Nutrition')
|
| 20 |
+
# List of items
|
| 21 |
+
#items = ['Item 1', 'Item 2', 'Item 3']
|
| 22 |
|
| 23 |
+
#list to of Ingredients camptured
|
| 24 |
+
#ingredientsList =["apple", "orange", "mango"] #list()
|
| 25 |
+
|
| 26 |
+
# Create a VideoCapture object to access the webcam
|
| 27 |
+
cap = cv2.VideoCapture(0)
|
| 28 |
|
| 29 |
+
# Set the video frame width and height (optional)
|
| 30 |
+
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 800)
|
| 31 |
+
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 400)
|
| 32 |
|
| 33 |
+
# Check if the webcam is opened correctly
|
| 34 |
+
if not cap.isOpened():
|
| 35 |
+
st.error("Error: Unable to access the webcam.")
|
| 36 |
+
return
|
| 37 |
+
|
| 38 |
+
# Display a placeholder for the video stream
|
| 39 |
+
video_placeholder = st.empty()
|
| 40 |
+
# Button to capture image
|
| 41 |
+
if st.button("Capture Image"):
|
| 42 |
+
image_path = capture_image()
|
| 43 |
+
classification = classifyImage(image_path)
|
| 44 |
+
session_state['ingredientsList'].append(classification)
|
| 45 |
+
|
| 46 |
+
# Button to indicate done
|
| 47 |
+
#done_button = st.sidebar.button('Done')
|
| 48 |
+
|
| 49 |
+
# Display the captured ingredients
|
| 50 |
+
#st.write("Captured Ingredients:", session_state['ingredientsList'])
|
| 51 |
+
|
| 52 |
+
button_clicked = st.sidebar.button('Done')
|
| 53 |
+
|
| 54 |
+
# Display recipes if "Done" is clicked
|
| 55 |
+
while not button_clicked:
|
| 56 |
+
# Read a frame from the webcam
|
| 57 |
+
ret, frame = cap.read()
|
| 58 |
+
|
| 59 |
+
if not ret:
|
| 60 |
+
st.error("Error: Unable to read frame from the webcam.")
|
| 61 |
+
break
|
| 62 |
+
|
| 63 |
+
# Display the frame in the Streamlit app
|
| 64 |
+
video_placeholder.image(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), channels="RGB", use_column_width=True)
|
| 65 |
+
if button_clicked:
|
| 66 |
+
cap.release()
|
| 67 |
+
if session_state['ingredientsList']:
|
| 68 |
+
session_state['ingredientsList'].pop()
|
| 69 |
+
#st.write("Updated Ingredients List:", session_state['ingredientsList'])
|
| 70 |
+
|
| 71 |
+
displayRecipes(session_state['ingredientsList'])
|
| 72 |
+
# Define content for each item
|
| 73 |
+
content = {}
|
| 74 |
+
for ingredient in session_state['ingredientsList']:
|
| 75 |
+
content[ingredient] = askGPT(f"Give me your estimate the calories, grams of protein, grams of sugar, grams of fat, and grams of carbohydrates per 100g of {ingredient} as a list")
|
| 76 |
+
|
| 77 |
+
# Display expanders for each item
|
| 78 |
+
for ingredient in session_state['ingredientsList']:
|
| 79 |
+
with st.sidebar.expander(ingredient):
|
| 80 |
+
st.write(content[ingredient])
|
| 81 |
+
displayRecipes(session_state['ingredientsList'])
|
| 82 |
+
|
| 83 |
+
def displayRecipes(ingredientsList):
|
| 84 |
+
items = []
|
| 85 |
+
#now we are gonna send the ingredient list to ask gpt
|
| 86 |
+
prompt = f"I have following Ingredients :{','.join(ingredientsList)}. What can I make with these \
|
| 87 |
+
Ingredients? Give me A list of detailed recipes with measurements containing these ingredients with Nutrition Facts per 100g based on the widely accepted nutritional value of each of these ingredients. Rank the list from \
|
| 88 |
+
highest nutritional value to lowest. Give me results in \
|
| 89 |
+
following format and do not deviate from this format:\
|
| 90 |
+
['Recipe Title', 'content of recipe and nutritional facts per 100g']. Only give me the list. Do not add commentary or personalized responses. Keep it under 200 words."
|
| 91 |
+
#prompt = f"You are going to act as a nutritional expert who has a lot of knowledge about food. I have the following ingredients: {','.join(ingredientsList)}. What can I make with these ingredients? Give me a list of names of recipes, maximum five."
|
| 92 |
+
LLMResult = askGPT(prompt)
|
| 93 |
+
lystOfRecipes = LLMResult.split('\n\n')
|
| 94 |
+
# print(lystOfRecipes)
|
| 95 |
+
for recipe in range(1,len(lystOfRecipes)-1):
|
| 96 |
+
items.append({"title": lystOfRecipes[recipe].split(":")[0], "content": ""})
|
| 97 |
+
# Display the items with =expanding boxes
|
| 98 |
+
for item in items:
|
| 99 |
+
#for side bar's item
|
| 100 |
+
#with st.sidebar.expander(item):
|
| 101 |
+
#st.write("man-holding-banana.jpeg")
|
| 102 |
+
#main page items
|
| 103 |
+
with st.expander(item["title"]):
|
| 104 |
+
st.write(askGPT(f"Give me a detailed recipe for a dish called {item['title']} containing all of the following ingredients: {','.join(ingredientsList)}. Make sure your response is easy to follow and comprehensive."))
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def capture_image():
|
| 108 |
+
# Create a VideoCapture object to access the webcam
|
| 109 |
+
cap = cv2.VideoCapture(0)
|
| 110 |
+
|
| 111 |
+
# Set the video frame width and height (optional)
|
| 112 |
+
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 800)
|
| 113 |
+
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 400)
|
| 114 |
+
|
| 115 |
+
# Check if the webcam is opened correctly
|
| 116 |
+
if not cap.isOpened():
|
| 117 |
+
st.error("Error: Unable to access the webcam.")
|
| 118 |
+
return
|
| 119 |
+
|
| 120 |
+
# Read a frame from the webcam
|
| 121 |
+
ret, frame = cap.read()
|
| 122 |
+
|
| 123 |
+
if not ret:
|
| 124 |
+
st.error("Error: Unable to read frame from the webcam.")
|
| 125 |
+
return
|
| 126 |
+
|
| 127 |
+
# Save the frame as an image
|
| 128 |
+
image_path = f"captured_images/captured_image_{len(os.listdir('captured_images')) + 1}.jpg"
|
| 129 |
+
cv2.imwrite(image_path, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
| 130 |
+
st.success(f"Image captured and saved as {image_path}")
|
| 131 |
+
|
| 132 |
+
# Release the VideoCapture and close the OpenCV window
|
| 133 |
+
cap.release()
|
| 134 |
+
return image_path
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
if __name__ == '__main__':
|
| 138 |
+
main()
|
backend.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This is the main logic file that contains hugging face model interaction
|
| 2 |
+
|
| 3 |
+
# This model is for detecting food in the image.
|
| 4 |
+
# Use a pipeline as a high-level helper
|
| 5 |
+
from transformers import pipeline
|
| 6 |
+
import os
|
| 7 |
+
import openai
|
| 8 |
+
openai.organization = "org-5Z0c3Uk1VG7t3TsczN6M4FCi"
|
| 9 |
+
#openai.api_key = os.getenv("OPENAI_API_KEY")
|
| 10 |
+
openai.api_key_path ="./key.txt"
|
| 11 |
+
|
| 12 |
+
def askGPT(prompt="what can I make with potato?"):
|
| 13 |
+
response = openai.ChatCompletion.create(
|
| 14 |
+
model="gpt-3.5-turbo",
|
| 15 |
+
messages=[
|
| 16 |
+
{
|
| 17 |
+
"role": "system",
|
| 18 |
+
"content":prompt
|
| 19 |
+
},
|
| 20 |
+
{
|
| 21 |
+
"role": "user",
|
| 22 |
+
"content": ""
|
| 23 |
+
} ],
|
| 24 |
+
temperature=1,
|
| 25 |
+
max_tokens=256,
|
| 26 |
+
top_p=1,
|
| 27 |
+
frequency_penalty=0,
|
| 28 |
+
presence_penalty=0
|
| 29 |
+
)
|
| 30 |
+
result = response["choices"][0]["message"]["content"]
|
| 31 |
+
return result
|
| 32 |
+
|
| 33 |
+
def classifyImage(image):
|
| 34 |
+
pipe = pipeline("image-classification", model="microsoft/resnet-50")
|
| 35 |
+
result = pipe(image)
|
| 36 |
+
return result[0]['label']
|
| 37 |
+
|
| 38 |
+
|
requirements.txt
CHANGED
|
@@ -1,2 +1,46 @@
|
|
| 1 |
-
|
| 2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
async-generator==1.10
|
| 2 |
+
attrs==21.4.0
|
| 3 |
+
certifi==2022.6.15
|
| 4 |
+
cffi==1.15.0
|
| 5 |
+
charset-normalizer==3.1.0
|
| 6 |
+
cryptography==36.0.1
|
| 7 |
+
distlib==0.3.4
|
| 8 |
+
filelock==3.7.0
|
| 9 |
+
fsspec==2023.9.1
|
| 10 |
+
h11==0.13.0
|
| 11 |
+
huggingface-hub==0.17.1
|
| 12 |
+
idna==3.3
|
| 13 |
+
numpy==1.22.3
|
| 14 |
+
outcome==1.1.0
|
| 15 |
+
packaging==23.0
|
| 16 |
+
pandas==1.4.1
|
| 17 |
+
pbr==5.9.0
|
| 18 |
+
platformdirs==2.5.2
|
| 19 |
+
pycparser==2.21
|
| 20 |
+
pyOpenSSL==21.0.0
|
| 21 |
+
python-dateutil==2.8.2
|
| 22 |
+
pytz==2022.1
|
| 23 |
+
PyYAML==6.0
|
| 24 |
+
regex==2023.8.8
|
| 25 |
+
requests==2.28.2
|
| 26 |
+
safetensors==0.3.3
|
| 27 |
+
scipy==1.9.3
|
| 28 |
+
selenium==4.1.0
|
| 29 |
+
six==1.16.0
|
| 30 |
+
snakeviz==2.1.1
|
| 31 |
+
sniffio==1.2.0
|
| 32 |
+
sortedcontainers==2.4.0
|
| 33 |
+
stevedore==3.5.0
|
| 34 |
+
thread6==0.2.0
|
| 35 |
+
tokenizers==0.13.3
|
| 36 |
+
tornado==6.2
|
| 37 |
+
tqdm==4.65.0
|
| 38 |
+
transformers==4.33.2
|
| 39 |
+
trio==0.19.0
|
| 40 |
+
trio-websocket==0.9.2
|
| 41 |
+
typing_extensions==4.5.0
|
| 42 |
+
urllib3==1.26.8
|
| 43 |
+
virtualenv==20.14.1
|
| 44 |
+
virtualenv-clone==0.5.7
|
| 45 |
+
virtualenvwrapper==4.8.4
|
| 46 |
+
wsproto==1.0.0
|