YAML Metadata Warning: empty or missing yaml metadata in repo card (https://huggingface.co/docs/hub/model-cards#model-card-metadata)
import torch
from peft import PeftModel
from transformers import AutoProcessor, Qwen2AudioForConditionalGeneration, BitsAndBytesConfig, Gemma3nForConditionalGeneration

model = "google/gemma-3n-E4B-it"
bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_use_double_quant=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16
)

processor = AutoProcessor.from_pretrained(model,
                                        cache_dir = os.getenv('CACHE_DIR'))

base_model = Gemma3nForConditionalGeneration.from_pretrained(
    model,
    device_map="auto",
    quantization_config=bnb_config,
    torch_dtype=torch.bfloat16,
    cache_dir = os.getenv('CACHE_DIR')
)

model = PeftModel.from_pretrained(base_model, "binhquoc/alm-add-gemma-non")
model.eval()
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Collection including binhquoc/alm-add-gemma-non