File size: 3,655 Bytes
1a820da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8456bda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a820da
 
5162826
 
 
1a820da
5162826
 
1a820da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5162826
1a820da
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96

# Load dependencies
import time
import transformers
import torch
import spaces # Optional: run our model on the GPU (this will be much faster inference)

import gradio as gr

from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import pipeline

@spaces.GPU # Optional: run our model on the GPU (this will be much faster inference)
def pred_on_text(input_text):
    start_time = time.time()

    raw_output = loaded_model_pipeline(text_inputs=[{"role": "user",
                                                    "content": input_text}],
                                       max_new_tokens=256,
                                       disable_compile=True)
    end_time = time.time()
    total_time = round(end_time - start_time, 4)

    generated_text = raw_output[0]["generated_text"][1]["content"]

    return generated_text, raw_output, total_time

# Load the model (from our Hugging Face Repo)
# Note: You may have to replace my username `objects76` for your own
MODEL_PATH = "objects76/FoodExtract-gemma-3-270m-fine-tune-v1"


# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained(
    MODEL_PATH
)

# # Load the model into a pipeline
# loaded_model = AutoModelForCausalLM.from_pretrained(
#     pretrained_model_name_or_path=MODEL_PATH,
#     dtype="auto",
#     device_map="auto",
#     attn_implementation="eager"
# )
# loaded_model_pipeline = pipeline("text-generation",
#                                  model=loaded_model,
#                                  tokenizer=tokenizer)

loaded_model_pipeline = pipeline(
    "text-generation",
    model=MODEL_PATH,  # ← pass path, let pipeline load
    tokenizer=tokenizer,
    torch_dtype="auto",
    device_map="auto",
    model_kwargs={"attn_implementation": "eager"}
)


# Create the demo
description = """ν…μŠ€νŠΈμ—μ„œ μŒμ‹κ³Ό 음료 ν•­λͺ©μ„ μΆ”μΆœν•˜λŠ” νŒŒμΈνŠœλ‹λœ SLM(Small Language Model)
 - basemodel: [Gemma 3 270M](https://huggingface.co/google/gemma-3-270m-it)
 - dataset: [FoodExtract-1k 데이터셋](https://huggingface.co/datasets/objects76/FoodExtract-1k)

* μž…λ ₯ (str): μ›μ‹œ ν…μŠ€νŠΈ λ¬Έμžμ—΄ λ˜λŠ” 이미지 μΊ‘μ…˜ (예: "ν•­ν•΄ 앉아 μžˆλŠ” 개의 사진" λ˜λŠ” "베이컨, κ³„λž€, ν† μŠ€νŠΈκ°€ μžˆλŠ” μ•„μΉ¨ 식사")
* 좜λ ₯ (str): μŒμ‹/λΉ„μŒμ‹ λΆ„λ₯˜μ™€ μΆ”μΆœλœ λͺ…μ‚¬ν˜• μŒμ‹ 및 음료 ν•­λͺ©, λ‹€μ–‘ν•œ μŒμ‹ νƒœκ·Έκ°€ ν¬ν•¨λœ 생성 ν…μŠ€νŠΈ

For example:

* Input: "For breakfast I had eggs, bacon and toast and a glass of orange juice"
* Output:

```
food_or_drink: 1
tags: fi, di
foods: eggs, bacon, toast
drinks: orange juice
```
"""

demo = gr.Interface(fn=pred_on_text,
                    inputs=gr.TextArea(lines=4, label="Input Text"),
                    outputs=[gr.TextArea(lines=4, label="Generated Text"),
                             gr.TextArea(lines=7, label="Raw Output"),
                             gr.Number(label="Generation Time (s)")],
                    title="🍳 Structured FoodExtract with a Fine-Tuned Gemma 3 270M",
                    description=description,
                    examples=[["Hello world! This is my first fine-tuned LLM!"],
                              ["그릴에 ꡬ운 바라문디와 아보카도, 올리브, ν† λ§ˆν† , μ΄νƒˆλ¦¬μ•ˆ λ“œλ ˆμ‹±μ΄ 곁듀여진 μƒλŸ¬λ“œκ°€ μžˆλŠ” ν•œ μ ‘μ‹œ μŒμ‹"],
                              ["British Breakfast with baked beans, fried eggs, black pudding, sausages, bacon, mushrooms, a cup of tea and toast and fried tomatoes"],
                              ["Steak tacos"],
                              ["A photo of a dog sitting on a beach"]]
)

if __name__ == "__main__":
    demo.launch(share=False)