codegemma-7b-inst / gemma_convert.py
dogma-black's picture
Upload 14 files
295dd84 verified
import torch
from transformers import AutoModelForCausalLM, GemmaTokenizer
import coremltools as ct
model_path = "/Users/sa/modelos AI/codegemma-7b-it"
# Cargar modelo PyTorch
model = AutoModelForCausalLM.from_pretrained(model_path, local_files_only=True, low_cpu_mem_usage=True)
model.eval()
# Convertir a Core ML (mlpackage)
# Asumiendo secuencia de entrada variable
example_input = torch.ones(1, 1, dtype=torch.int32)
mlmodel = ct.convert(
model,
inputs=[ct.TensorType(shape=example_input.shape, dtype=ct.int32)],
compute_units=ct.ComputeUnit.CPU_AND_GPU # Metal + CPU
)
mlmodel.save("/Users/sa/modelos AI/codegemma-7b-it/codegemma-7b.mlpackage")