Spaces:
Sleeping
Sleeping
submit button
#6
by
drod75
- opened
app.py
CHANGED
|
@@ -277,6 +277,7 @@ st.sidebar.write("Upload an image and/or enter a query to get started! Explore o
|
|
| 277 |
|
| 278 |
uploaded_image = st.sidebar.file_uploader("Choose an image:", type="jpg")
|
| 279 |
query = st.sidebar.text_area("Enter your query:", height=100)
|
|
|
|
| 280 |
|
| 281 |
# gap
|
| 282 |
st.sidebar.markdown("<br><br><br>", unsafe_allow_html=True)
|
|
@@ -307,7 +308,7 @@ with st.expander("**What is FOOD CHAIN?**"):
|
|
| 307 |
#################
|
| 308 |
|
| 309 |
# Image Classification Section
|
| 310 |
-
if uploaded_image and
|
| 311 |
with st.expander("**Food Classification**", expanded=True, icon=':material/search_insights:'):
|
| 312 |
st.title("Results: Image Classification")
|
| 313 |
# Open the image
|
|
@@ -352,11 +353,11 @@ if uploaded_image and query:
|
|
| 352 |
openAIresponse = llm.invoke(openAICall)
|
| 353 |
print("AI CALL RESPONSE: ", openAIresponse.content)
|
| 354 |
|
|
|
|
| 355 |
with st.expander("Recipe Generation", expanded=True, icon=':material/menu_book:'):
|
| 356 |
st.title('Results: RAG')
|
| 357 |
# RAG the openai response and display
|
| 358 |
print("RAG INPUT", openAIresponse.content + " " + query)
|
| 359 |
-
RAGresponse = get_response(openAIresponse.content + " " + query)
|
| 360 |
display_response(RAGresponse)
|
| 361 |
elif uploaded_image is not None:
|
| 362 |
with st.expander("**Food Classification**", expanded=True, icon=':material/search_insights:'):
|
|
@@ -382,10 +383,10 @@ elif uploaded_image is not None:
|
|
| 382 |
st.markdown(f"*{class_name}*: {confidence:.2f}%")
|
| 383 |
print(fpredictions)
|
| 384 |
|
| 385 |
-
elif
|
| 386 |
-
|
|
|
|
| 387 |
st.title("Results: RAG")
|
| 388 |
-
response = get_response(query)
|
| 389 |
display_response(response)
|
| 390 |
else:
|
| 391 |
st.warning("Please input an image and/or a prompt.", icon=':material/no_meals:')
|
|
|
|
| 277 |
|
| 278 |
uploaded_image = st.sidebar.file_uploader("Choose an image:", type="jpg")
|
| 279 |
query = st.sidebar.text_area("Enter your query:", height=100)
|
| 280 |
+
recipe_submit = st.sidebar.button(label='Chain Recipe', icon=':material/link:', use_container_width=True)
|
| 281 |
|
| 282 |
# gap
|
| 283 |
st.sidebar.markdown("<br><br><br>", unsafe_allow_html=True)
|
|
|
|
| 308 |
#################
|
| 309 |
|
| 310 |
# Image Classification Section
|
| 311 |
+
if uploaded_image and recipe_submit:
|
| 312 |
with st.expander("**Food Classification**", expanded=True, icon=':material/search_insights:'):
|
| 313 |
st.title("Results: Image Classification")
|
| 314 |
# Open the image
|
|
|
|
| 353 |
openAIresponse = llm.invoke(openAICall)
|
| 354 |
print("AI CALL RESPONSE: ", openAIresponse.content)
|
| 355 |
|
| 356 |
+
RAGresponse = get_response(openAIresponse.content + " " + query)
|
| 357 |
with st.expander("Recipe Generation", expanded=True, icon=':material/menu_book:'):
|
| 358 |
st.title('Results: RAG')
|
| 359 |
# RAG the openai response and display
|
| 360 |
print("RAG INPUT", openAIresponse.content + " " + query)
|
|
|
|
| 361 |
display_response(RAGresponse)
|
| 362 |
elif uploaded_image is not None:
|
| 363 |
with st.expander("**Food Classification**", expanded=True, icon=':material/search_insights:'):
|
|
|
|
| 383 |
st.markdown(f"*{class_name}*: {confidence:.2f}%")
|
| 384 |
print(fpredictions)
|
| 385 |
|
| 386 |
+
elif recipe_submit:
|
| 387 |
+
response = get_response(query)
|
| 388 |
+
with st.expander("**Recipe Generation**", expanded=True, icon=':material/menu_book:'):
|
| 389 |
st.title("Results: RAG")
|
|
|
|
| 390 |
display_response(response)
|
| 391 |
else:
|
| 392 |
st.warning("Please input an image and/or a prompt.", icon=':material/no_meals:')
|