Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,7 +14,7 @@ from loguru import logger
|
|
| 14 |
from PIL import Image
|
| 15 |
from transformers import AutoProcessor, Gemma3ForConditionalGeneration, TextIteratorStreamer
|
| 16 |
|
| 17 |
-
model_id = os.getenv("MODEL_ID", "gmonsoon/gemma-3-
|
| 18 |
processor = AutoProcessor.from_pretrained(model_id, padding_side="left")
|
| 19 |
model = Gemma3ForConditionalGeneration.from_pretrained(
|
| 20 |
model_id, device_map="auto", torch_dtype=torch.bfloat16, attn_implementation="eager"
|
|
@@ -261,7 +261,7 @@ demo = gr.ChatInterface(
|
|
| 261 |
textbox=gr.MultimodalTextbox(file_types=["image", ".mp4"], file_count="multiple", autofocus=True),
|
| 262 |
multimodal=True,
|
| 263 |
additional_inputs=[
|
| 264 |
-
gr.Textbox(label="System Prompt", value="You are a helpful assistant who always provides
|
| 265 |
gr.Slider(label="Max New Tokens", minimum=512, maximum=2000, step=10, value=1024),
|
| 266 |
],
|
| 267 |
stop_btn=False,
|
|
|
|
| 14 |
from PIL import Image
|
| 15 |
from transformers import AutoProcessor, Gemma3ForConditionalGeneration, TextIteratorStreamer
|
| 16 |
|
| 17 |
+
model_id = os.getenv("MODEL_ID", "gmonsoon/gemma-3-4B-EBT")
|
| 18 |
processor = AutoProcessor.from_pretrained(model_id, padding_side="left")
|
| 19 |
model = Gemma3ForConditionalGeneration.from_pretrained(
|
| 20 |
model_id, device_map="auto", torch_dtype=torch.bfloat16, attn_implementation="eager"
|
|
|
|
| 261 |
textbox=gr.MultimodalTextbox(file_types=["image", ".mp4"], file_count="multiple", autofocus=True),
|
| 262 |
multimodal=True,
|
| 263 |
additional_inputs=[
|
| 264 |
+
gr.Textbox(label="System Prompt", value="You are a helpful assistant who always provides helpful answer"),
|
| 265 |
gr.Slider(label="Max New Tokens", minimum=512, maximum=2000, step=10, value=1024),
|
| 266 |
],
|
| 267 |
stop_btn=False,
|