Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import torch | |
| import coremltools as ct | |
| from huggingface_hub import snapshot_download | |
| from transformers import AutoModel, AutoTokenizer | |
| import os | |
| import tempfile | |
| import shutil | |
| def convert_to_coreml(model_name, input_length=128, ios_version="iOS15"): | |
| try: | |
| print(f"Downloading model {model_name}...") | |
| # Download model from Huggingface | |
| model_path = snapshot_download(repo_id=model_name) | |
| print("Loading model...") | |
| # Load model and tokenizer | |
| model = AutoModel.from_pretrained(model_path) | |
| tokenizer = AutoTokenizer.from_pretrained(model_path) | |
| # Set model to evaluation mode | |
| model.eval() | |
| print("Creating example input...") | |
| # Create example input | |
| if hasattr(tokenizer, "pad_token") and tokenizer.pad_token is None: | |
| tokenizer.pad_token = tokenizer.eos_token | |
| sample_text = "This is a sample input for conversion" | |
| inputs = tokenizer(sample_text, return_tensors="pt", padding="max_length", max_length=input_length) | |
| print("Tracing model...") | |
| # Trace the model | |
| traced_model = torch.jit.trace(model, [inputs["input_ids"], inputs["attention_mask"]]) | |
| # Define Core ML target version | |
| if ios_version == "iOS15": | |
| target = ct.target.iOS15 | |
| elif ios_version == "iOS16": | |
| target = ct.target.iOS16 | |
| else: | |
| target = ct.target.iOS17 | |
| print(f"Converting to Core ML (targeting {ios_version})...") | |
| # Convert to Core ML | |
| mlmodel = ct.convert( | |
| traced_model, | |
| inputs=[ | |
| ct.TensorType(name="input_ids", shape=inputs["input_ids"].shape), | |
| ct.TensorType(name="attention_mask", shape=inputs["attention_mask"].shape) | |
| ], | |
| minimum_deployment_target=target | |
| ) | |
| # Create a temporary directory to save the model | |
| temp_dir = tempfile.mkdtemp() | |
| model_file = os.path.join(temp_dir, f"{model_name.split('/')[-1]}.mlmodel") | |
| print(f"Saving model to {model_file}...") | |
| # Save the model | |
| mlmodel.save(model_file) | |
| return model_file, "Conversion successful!" | |
| except Exception as e: | |
| return None, f"Error: {str(e)}" | |
| def process(model_name, input_length, ios_version): | |
| model_file, message = convert_to_coreml(model_name, int(input_length), ios_version) | |
| if model_file: | |
| return message, model_file | |
| else: | |
| return message, None | |
| # Create Gradio interface | |
| with gr.Blocks(title="Huggingface to Core ML Converter") as demo: | |
| gr.Markdown("# Huggingface to Core ML Model Converter") | |
| gr.Markdown("Enter a Huggingface model ID and convert it to Core ML format.") | |
| with gr.Row(): | |
| with gr.Column(): | |
| model_name = gr.Textbox(label="Huggingface Model ID (e.g., 'your-username/your-model')") | |
| input_length = gr.Slider(minimum=16, maximum=512, value=128, step=16, label="Input Length") | |
| ios_version = gr.Dropdown(choices=["iOS15", "iOS16", "iOS17"], value="iOS15", label="Target iOS Version") | |
| convert_button = gr.Button("Convert Model") | |
| with gr.Column(): | |
| output_message = gr.Textbox(label="Status") | |
| file_output = gr.File(label="Converted Model") | |
| convert_button.click( | |
| process, | |
| inputs=[model_name, input_length, ios_version], | |
| outputs=[output_message, file_output] | |
| ) | |
| demo.launch() |