Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,106 +1,78 @@
|
|
| 1 |
import torch
|
| 2 |
import gradio as gr
|
| 3 |
-
|
| 4 |
import spaces
|
| 5 |
from transformers import pipeline
|
| 6 |
|
| 7 |
BASE_MODEL_ID = "HuggingFaceTB/SmolVLM2-500M-Video-Instruct"
|
| 8 |
-
# FINE_TUNED_MODEL_ID = "mrdbourke/FoodExtract-Vision-SmolVLM2-500M-fine-tune-v1"
|
| 9 |
FINE_TUNED_MODEL_ID = "ninjals/FoodExtract-Vision-SmolVLM2-500M-fine-tune-v1-VIDEO"
|
| 10 |
OUTPUT_TOKENS = 256
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
original_pipeline =
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
def create_message(input_image):
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
@spaces.GPU
|
| 38 |
def extract_foods_from_image(input_image):
|
| 39 |
-
|
| 40 |
-
input_message = create_message(input_image=input_image)
|
| 41 |
-
|
| 42 |
-
# Get outputs from base model (not fine-tuned)
|
| 43 |
-
original_pipeline_output = original_pipeline(text=[input_message],
|
| 44 |
-
max_new_tokens=OUTPUT_TOKENS)
|
| 45 |
|
| 46 |
-
|
|
|
|
| 47 |
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
max_new_tokens=OUTPUT_TOKENS)
|
| 51 |
-
outputs_fine_tuned = ft_pipe_output[0][0]["generated_text"][-1]["content"]
|
| 52 |
|
|
|
|
|
|
|
| 53 |
return outputs_pretrained, outputs_fine_tuned
|
| 54 |
|
| 55 |
-
demo_title = "🥑➡️📝 FoodExtract-Vision with a fine-tuned SmolVLM2-500M"
|
| 56 |
-
demo_description = """* **Base model:** https://huggingface.co/HuggingFaceTB/SmolVLM-500M-Instruct
|
| 57 |
-
* **Fine-tuning dataset:** https://huggingface.co/datasets/mrdbourke/FoodExtract-1k-Vision (1k food images and 500 not food images)
|
| 58 |
-
* **Fine-tuned model:** https://huggingface.co/ninjals/FoodExtract-Vision-SmolVLM2-500M-fine-tune-v1-VIDEO
|
| 59 |
-
|
| 60 |
-
## Overview
|
| 61 |
-
|
| 62 |
-
Extract food and drink items in a structured way from images.
|
| 63 |
-
|
| 64 |
-
The original model outputs fail to capture the desired structure. But the fine-tuned model sticks to the output structure quite well.
|
| 65 |
-
|
| 66 |
-
However, the fine-tuned model could definitely be improved with respects to its ability to extract the right food/drink items.
|
| 67 |
-
|
| 68 |
-
Both models use the input prompt:
|
| 69 |
-
|
| 70 |
-
````
|
| 71 |
-
Classify the given input image into food or not and if edible food or drink items are present, extract those to a list. If no food/drink items are visible, return empty lists.
|
| 72 |
-
|
| 73 |
-
Only return valid JSON in the following form:
|
| 74 |
-
|
| 75 |
-
```json
|
| 76 |
-
{
|
| 77 |
-
'is_food': 0, # int - 0 or 1 based on whether food/drinks are present (0 = no foods visible, 1 = foods visible)
|
| 78 |
-
'image_title': '', # str - short food-related title for what foods/drinks are visible in the image, leave blank if no foods present
|
| 79 |
-
'food_items': [], # list[str] - list of visible edible food item nouns
|
| 80 |
-
'drink_items': [] # list[str] - list of visible edible drink item nouns
|
| 81 |
-
}
|
| 82 |
-
```
|
| 83 |
-
````
|
| 84 |
-
|
| 85 |
-
Except one model has been fine-tuned on the structured data whereas the other hasn't.
|
| 86 |
-
|
| 87 |
-
Notable next steps would be:
|
| 88 |
-
* **Remove the input prompt:** Just train the model to go straight from image -> text (no text prompt on input), this would save on inference tokens.
|
| 89 |
-
* **Fine-tune on more real-world data:** Right now the model is only trained on 1k food images (from Food101) and 500 not food (random internet images), training on real world data would likely significantly improve performance.
|
| 90 |
-
* **Fix the repetitive generation:** The model can sometimes get stuck in a repetitive generation pattern, e.g. "onions", "onions", "onions", etc. We could look into patterns to help reduce this.
|
| 91 |
-
"""
|
| 92 |
-
|
| 93 |
demo = gr.Interface(
|
| 94 |
fn=extract_foods_from_image,
|
| 95 |
inputs=gr.Image(type="pil"),
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
outputs=[gr.Textbox(lines=4, label="Original Model (not fine-tuned)"),
|
| 99 |
-
gr.Textbox(lines=4, label="Fine-tuned Model")],
|
| 100 |
-
examples=[["examples/camera.jpeg"],
|
| 101 |
-
["examples/Tandoori-Chicken.jpg"],
|
| 102 |
-
["examples/fries.jpeg"]],
|
| 103 |
)
|
| 104 |
|
| 105 |
if __name__ == "__main__":
|
| 106 |
-
demo.launch(
|
|
|
|
|
|
| 1 |
import torch
|
| 2 |
import gradio as gr
|
|
|
|
| 3 |
import spaces
|
| 4 |
from transformers import pipeline
|
| 5 |
|
| 6 |
BASE_MODEL_ID = "HuggingFaceTB/SmolVLM2-500M-Video-Instruct"
|
|
|
|
| 7 |
FINE_TUNED_MODEL_ID = "ninjals/FoodExtract-Vision-SmolVLM2-500M-fine-tune-v1-VIDEO"
|
| 8 |
OUTPUT_TOKENS = 256
|
| 9 |
|
| 10 |
+
DTYPE = torch.float16 # ✅ safest default
|
| 11 |
+
|
| 12 |
+
original_pipeline = None
|
| 13 |
+
ft_pipe = None
|
| 14 |
+
|
| 15 |
+
def _load_pipes():
|
| 16 |
+
global original_pipeline, ft_pipe
|
| 17 |
+
if original_pipeline is None:
|
| 18 |
+
print("[INFO] Loading Original Model")
|
| 19 |
+
original_pipeline = pipeline(
|
| 20 |
+
"image-text-to-text",
|
| 21 |
+
model=BASE_MODEL_ID,
|
| 22 |
+
device_map="auto",
|
| 23 |
+
dtype=DTYPE,
|
| 24 |
+
)
|
| 25 |
+
if ft_pipe is None:
|
| 26 |
+
print("[INFO] Loading Fine-tuned Model")
|
| 27 |
+
ft_pipe = pipeline(
|
| 28 |
+
"image-text-to-text",
|
| 29 |
+
model=FINE_TUNED_MODEL_ID,
|
| 30 |
+
device_map="auto",
|
| 31 |
+
dtype=DTYPE, # ✅ keep fp16 (don’t use bf16)
|
| 32 |
+
)
|
| 33 |
|
| 34 |
def create_message(input_image):
|
| 35 |
+
prompt = (
|
| 36 |
+
"Classify the given input image into food or not and if edible food or drink items are present, "
|
| 37 |
+
"extract those to a list. If no food/drink items are visible, return empty lists.\n\n"
|
| 38 |
+
'Only return valid JSON in the following form:\n\n'
|
| 39 |
+
'{\n'
|
| 40 |
+
' "is_food": 0,\n'
|
| 41 |
+
' "image_title": "",\n'
|
| 42 |
+
' "food_items": [],\n'
|
| 43 |
+
' "drink_items": []\n'
|
| 44 |
+
'}\n'
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
return [{
|
| 48 |
+
"role": "user",
|
| 49 |
+
"content": [
|
| 50 |
+
{"type": "image", "image": input_image},
|
| 51 |
+
{"type": "text", "text": prompt},
|
| 52 |
+
],
|
| 53 |
+
}]
|
| 54 |
|
| 55 |
@spaces.GPU
|
| 56 |
def extract_foods_from_image(input_image):
|
| 57 |
+
_load_pipes()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
|
| 59 |
+
input_image = input_image.resize((512, 512))
|
| 60 |
+
input_message = create_message(input_image)
|
| 61 |
|
| 62 |
+
base_out = original_pipeline(text=[input_message], max_new_tokens=OUTPUT_TOKENS, do_sample=False)
|
| 63 |
+
ft_out = ft_pipe(text=[input_message], max_new_tokens=OUTPUT_TOKENS, do_sample=False)
|
|
|
|
|
|
|
| 64 |
|
| 65 |
+
outputs_pretrained = base_out[0][0]["generated_text"][-1]["content"]
|
| 66 |
+
outputs_fine_tuned = ft_out[0][0]["generated_text"][-1]["content"]
|
| 67 |
return outputs_pretrained, outputs_fine_tuned
|
| 68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
demo = gr.Interface(
|
| 70 |
fn=extract_foods_from_image,
|
| 71 |
inputs=gr.Image(type="pil"),
|
| 72 |
+
outputs=[gr.Textbox(lines=4), gr.Textbox(lines=4)],
|
| 73 |
+
examples=[["examples/camera.jpeg"], ["examples/Tandoori-Chicken.jpg"], ["examples/fries.jpeg"]],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
)
|
| 75 |
|
| 76 |
if __name__ == "__main__":
|
| 77 |
+
demo.launch(show_error=True)
|
| 78 |
+
|