File size: 666 Bytes
295dd84 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 |
import torch
from transformers import AutoModelForCausalLM, GemmaTokenizer
import coremltools as ct
model_path = "/Users/sa/modelos AI/codegemma-7b-it"
# Cargar modelo PyTorch
model = AutoModelForCausalLM.from_pretrained(model_path, local_files_only=True, low_cpu_mem_usage=True)
model.eval()
# Convertir a Core ML (mlpackage)
# Asumiendo secuencia de entrada variable
example_input = torch.ones(1, 1, dtype=torch.int32)
mlmodel = ct.convert(
model,
inputs=[ct.TensorType(shape=example_input.shape, dtype=ct.int32)],
compute_units=ct.ComputeUnit.CPU_AND_GPU # Metal + CPU
)
mlmodel.save("/Users/sa/modelos AI/codegemma-7b-it/codegemma-7b.mlpackage") |