Spaces:
Running
on
Zero
Running
on
Zero
File size: 3,366 Bytes
401f0f9 ab9306d 401f0f9 ab9306d 401f0f9 ab9306d 401f0f9 ab9306d 401f0f9 ab9306d 401f0f9 ab9306d 401f0f9 ab9306d 401f0f9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
# Load dependencies
import time
import transformers
import torch
import spaces # Optional: run our model on the GPU (this will be much faster inference)
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import pipeline
@spaces.GPU # Optional: run our model on the GPU (this will be much faster inference)
def pred_on_text(input_text):
start_time = time.time()
raw_output = loaded_model_pipeline(text_inputs=[{"role": "user",
"content": input_text}],
max_new_tokens=256,
disable_compile=True)
end_time = time.time()
total_time = round(end_time - start_time, 4)
generated_text = raw_output[0]["generated_text"][1]["content"]
return generated_text, raw_output, total_time
# Load the model (from our Hugging Face Repo)
# Note: You may have to replace my username `mrdbourke` for your own
MODEL_PATH = "mrdbourke/FoodExtract-gemma-3-270m-fine-tune-v1"
# Load the model into a pipeline
loaded_model = AutoModelForCausalLM.from_pretrained(
pretrained_model_name_or_path=MODEL_PATH,
dtype="auto",
device_map="auto",
attn_implementation="eager"
)
# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained(
MODEL_PATH
)
# Create model pipeline
loaded_model_pipeline = pipeline("text-generation",
model=loaded_model,
tokenizer=tokenizer)
# Create the demo
description = """Extract food and drink items from text with a fine-tuned SLM (Small Language Model) or more specifically a fine-tuned [Gemma 3 270M](https://huggingface.co/google/gemma-3-270m-it).
Our model has been fine-tuned on the [FoodExtract-1k dataset](https://huggingface.co/datasets/mrdbourke/FoodExtract-1k).
* Input (str): Raw text strings or image captions (e.g. "A photo of a dog sitting on a beach" or "A breakfast plate with bacon, eggs and toast")
* Output (str): Generated text with food/not_food classification as well as noun extracted food and drink items and various food tags.
For example:
* Input: "For breakfast I had eggs, bacon and toast and a glass of orange juice"
* Output:
```
food_or_drink: 1
tags: fi, di
foods: eggs, bacon, toast
drinks: orange juice
```
"""
demo = gr.Interface(fn=pred_on_text,
inputs=gr.TextArea(lines=4, label="Input Text"),
outputs=[gr.TextArea(lines=4, label="Generated Text"),
gr.TextArea(lines=7, label="Raw Output"),
gr.Number(label="Generation Time (s)")],
title="🍳 Structured FoodExtract with a Fine-Tuned Gemma 3 270M",
description=description,
examples=[["Hello world! This is my first fine-tuned LLM!"],
["A plate of food with grilled barramundi, salad with avocado, olives, tomatoes and Italian dressing"],
["British Breakfast with baked beans, fried eggs, black pudding, sausages, bacon, mushrooms, a cup of tea and toast and fried tomatoes"],
["Steak tacos"],
["A photo of a dog sitting on a beach"]]
)
if __name__ == "__main__":
demo.launch(share=False)
|