| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| """ |
| GGUF Conversion Script for Vision/Multimodal Models |
| |
| Creates both model.gguf and mmproj-model.gguf files for vision models. |
| |
| Environment variables: |
| - MODEL_PATH: The model to convert (full model or LoRA adapter) |
| - BASE_MODEL: Base model for LoRA merge (optional, only for LoRA adapters) |
| - OUTPUT_REPO: Where to upload GGUF files |
| - MODEL_NAME: Name prefix for output files |
| - IS_LORA: "true" if this is a LoRA adapter, "false" for full model |
| """ |
|
|
| import os |
| import torch |
| from transformers import AutoModel, AutoTokenizer, AutoProcessor |
| from huggingface_hub import HfApi, hf_hub_download, login |
|
|
| |
| HF_TOKEN = os.environ.get("HF_TOKEN") |
| if HF_TOKEN: |
| print(f"HF_TOKEN found (length: {len(HF_TOKEN)})") |
| import subprocess |
| import shutil |
| import glob |
|
|
| print("=" * 60) |
| print("GGUF Conversion Script for Vision/Multimodal Models") |
| print("=" * 60) |
|
|
| |
| MODEL_PATH = os.environ.get("MODEL_PATH") |
| BASE_MODEL = os.environ.get("BASE_MODEL", "") |
| OUTPUT_REPO = os.environ.get("OUTPUT_REPO") |
| MODEL_NAME = os.environ.get("MODEL_NAME") |
| IS_LORA = os.environ.get("IS_LORA", "false").lower() == "true" |
|
|
| print(f"\nConfiguration:") |
| print(f" Model path: {MODEL_PATH}") |
| print(f" Base model: {BASE_MODEL}") |
| print(f" Output repo: {OUTPUT_REPO}") |
| print(f" Model name: {MODEL_NAME}") |
| print(f" Is LoRA: {IS_LORA}") |
|
|
| |
| print("\n[1/7] Loading model...") |
|
|
| merged_dir = "/tmp/merged_model" |
| os.makedirs(merged_dir, exist_ok=True) |
|
|
| if IS_LORA: |
| import json |
|
|
| print(f" Loading model with LoRA adapter...") |
| print(f" Base model: {BASE_MODEL}") |
| print(f" Adapter: {MODEL_PATH}") |
|
|
| model = None |
| tokenizer = None |
|
|
| |
| try: |
| print(" Trying unsloth FastModel...") |
| from unsloth import FastModel |
| model, tokenizer = FastModel.from_pretrained( |
| model_name=MODEL_PATH, |
| dtype=torch.float16, |
| load_in_4bit=False, |
| token=HF_TOKEN, |
| ) |
| print(" Loaded with unsloth FastModel") |
|
|
| |
| print(" Merging LoRA weights...") |
| model.save_pretrained_merged(merged_dir, tokenizer, save_method="merged_16bit") |
| print(f" Merged model saved to {merged_dir}") |
| model = None |
| except Exception as e: |
| print(f" Unsloth failed: {e}") |
| print(" Falling back to manual LoRA weight application...") |
|
|
| |
| from peft import LoraConfig, get_peft_model |
| from safetensors.torch import load_file |
|
|
| |
| adapter_weights_path = hf_hub_download(MODEL_PATH, "adapter_model.safetensors", token=HF_TOKEN) |
| adapter_config_path = hf_hub_download(MODEL_PATH, "adapter_config.json", token=HF_TOKEN) |
|
|
| with open(adapter_config_path) as f: |
| adapter_config = json.load(f) |
|
|
| |
| model_classes = [] |
| base_lower = BASE_MODEL.lower() |
| if "ministral" in base_lower or "mistral" in base_lower: |
| model_classes = [ |
| ("Mistral3ForConditionalGeneration", "transformers"), |
| ("AutoModelForCausalLM", "transformers"), |
| ] |
| elif "glm" in base_lower: |
| model_classes = [ |
| ("Glm4vForConditionalGeneration", "transformers"), |
| ("AutoModelForVision2Seq", "transformers"), |
| ] |
| elif "gemma" in base_lower: |
| model_classes = [ |
| ("Gemma3ForConditionalGeneration", "transformers"), |
| ("AutoModelForVision2Seq", "transformers"), |
| ] |
| else: |
| model_classes = [ |
| ("AutoModelForCausalLM", "transformers"), |
| ("AutoModelForVision2Seq", "transformers"), |
| ] |
|
|
| print(f" Detected model type, trying: {[c[0] for c in model_classes]}") |
|
|
| base_model = None |
| for class_name, module in model_classes: |
| try: |
| import importlib |
| mod = importlib.import_module(module) |
| model_class = getattr(mod, class_name) |
| print(f" Trying {class_name}...") |
| base_model = model_class.from_pretrained( |
| BASE_MODEL, |
| torch_dtype=torch.float16, |
| device_map="cpu", |
| trust_remote_code=True, |
| token=HF_TOKEN, |
| ) |
| print(f" Base model loaded with {class_name}") |
| break |
| except Exception as e2: |
| print(f" {class_name} failed: {e2}") |
| continue |
|
|
| if base_model is None: |
| raise ValueError(f"Could not load base model {BASE_MODEL}") |
|
|
| |
| print(" Loading adapter weights...") |
| adapter_weights = load_file(adapter_weights_path) |
|
|
| |
| print(" Applying LoRA weights to base model...") |
| lora_alpha = adapter_config.get("lora_alpha", 16) |
| lora_r = adapter_config.get("r", 8) |
| scaling = lora_alpha / lora_r |
|
|
| state_dict = base_model.state_dict() |
| for key, value in adapter_weights.items(): |
| |
| if "lora_A" in key: |
| base_key = key.replace(".lora_A.weight", ".weight").replace("base_model.model.", "") |
| lora_b_key = key.replace("lora_A", "lora_B") |
| if lora_b_key in adapter_weights and base_key in state_dict: |
| lora_a = value |
| lora_b = adapter_weights[lora_b_key] |
| |
| delta = scaling * (lora_b @ lora_a) |
| state_dict[base_key] = state_dict[base_key] + delta.to(state_dict[base_key].dtype) |
|
|
| base_model.load_state_dict(state_dict) |
| print(" LoRA weights applied") |
|
|
| |
| base_model.save_pretrained(merged_dir, safe_serialization=True) |
| del base_model |
|
|
| |
| |
| print(" Saving processor/tokenizer...") |
| processor_saved = False |
| for source in [MODEL_PATH, BASE_MODEL]: |
| try: |
| processor = AutoProcessor.from_pretrained(source, trust_remote_code=True, token=HF_TOKEN) |
| processor.save_pretrained(merged_dir) |
| print(f" Processor saved from {source}") |
| processor_saved = True |
| break |
| except Exception as e: |
| print(f" Could not load processor from {source}: {e}") |
|
|
| if not processor_saved: |
| for source in [MODEL_PATH, BASE_MODEL]: |
| try: |
| tokenizer = AutoTokenizer.from_pretrained(source, trust_remote_code=True, token=HF_TOKEN) |
| tokenizer.save_pretrained(merged_dir) |
| print(f" Tokenizer saved from {source}") |
| break |
| except Exception as e: |
| print(f" Could not load tokenizer from {source}: {e}") |
|
|
| |
| try: |
| chat_template_path = hf_hub_download(MODEL_PATH, "chat_template.jinja", token=HF_TOKEN) |
| shutil.copy(chat_template_path, f"{merged_dir}/chat_template.jinja") |
| print(" Copied chat_template.jinja from adapter") |
| except: |
| pass |
| else: |
| print(f" Loading full model: {MODEL_PATH}") |
| |
| from huggingface_hub import snapshot_download |
| snapshot_download( |
| repo_id=MODEL_PATH, |
| local_dir=merged_dir, |
| local_dir_use_symlinks=False, |
| token=HF_TOKEN, |
| ) |
| print(f" Model downloaded to {merged_dir}") |
|
|
| torch.cuda.empty_cache() |
| print(" Model prepared") |
|
|
| |
| print(f"\n Contents of {merged_dir}:") |
| for f in sorted(os.listdir(merged_dir))[:15]: |
| print(f" {f}") |
|
|
| |
| print("\n[2/7] Setting up llama.cpp...") |
| subprocess.run(["apt-get", "update", "-qq"], check=True, capture_output=True) |
| subprocess.run(["apt-get", "install", "-y", "-qq", "build-essential", "cmake"], check=True, capture_output=True) |
| print(" Build tools installed") |
|
|
| if os.path.exists("/tmp/llama.cpp"): |
| shutil.rmtree("/tmp/llama.cpp") |
| subprocess.run( |
| ["git", "clone", "--depth", "1", "https://github.com/ggml-org/llama.cpp.git", "/tmp/llama.cpp"], |
| check=True, capture_output=True |
| ) |
| print(" llama.cpp cloned") |
|
|
| subprocess.run(["pip", "install", "-q", "-r", "/tmp/llama.cpp/requirements.txt"], check=True, capture_output=True) |
| print(" Python dependencies installed") |
|
|
| |
| print("\n[3/7] Converting to GGUF format with multimodal projector...") |
| gguf_output_dir = "/tmp/gguf_output" |
| os.makedirs(gguf_output_dir, exist_ok=True) |
|
|
| convert_script = "/tmp/llama.cpp/convert_hf_to_gguf.py" |
| gguf_fp16 = f"{gguf_output_dir}/{MODEL_NAME}-f16.gguf" |
|
|
| |
| print(" Running conversion with --mmproj...") |
| result = subprocess.run( |
| ["python", convert_script, merged_dir, "--outfile", gguf_fp16, "--outtype", "f16", "--mmproj", merged_dir], |
| capture_output=True, text=True |
| ) |
| print(result.stdout) |
| if result.stderr: |
| print("STDERR:", result.stderr) |
|
|
| if result.returncode != 0: |
| print(" Warning: mmproj conversion may have failed, trying without...") |
| result = subprocess.run( |
| ["python", convert_script, merged_dir, "--outfile", gguf_fp16, "--outtype", "f16"], |
| check=True, capture_output=True, text=True |
| ) |
| print(result.stdout) |
|
|
| print(f" FP16 GGUF created") |
|
|
| |
| mmproj_files = glob.glob(f"{gguf_output_dir}/mmproj*.gguf") |
| if not mmproj_files: |
| |
| mmproj_files = glob.glob("mmproj*.gguf") |
| if mmproj_files: |
| |
| for f in mmproj_files: |
| shutil.move(f, gguf_output_dir) |
| mmproj_files = glob.glob(f"{gguf_output_dir}/mmproj*.gguf") |
|
|
| print(f"\n Files in output dir:") |
| for f in os.listdir(gguf_output_dir): |
| size_gb = os.path.getsize(f"{gguf_output_dir}/{f}") / (1024**3) |
| print(f" {f}: {size_gb:.2f} GB") |
|
|
| |
| print("\n[4/7] Building quantize tool...") |
| os.makedirs("/tmp/llama.cpp/build", exist_ok=True) |
|
|
| subprocess.run( |
| ["cmake", "-B", "/tmp/llama.cpp/build", "-S", "/tmp/llama.cpp", "-DGGML_CUDA=OFF"], |
| check=True, capture_output=True, text=True |
| ) |
| subprocess.run( |
| ["cmake", "--build", "/tmp/llama.cpp/build", "--target", "llama-quantize", "-j", "4"], |
| check=True, capture_output=True, text=True |
| ) |
| print(" Quantize tool built") |
|
|
| quantize_bin = "/tmp/llama.cpp/build/bin/llama-quantize" |
|
|
| |
| print("\n[5/7] Creating quantized versions...") |
| quant_formats = [ |
| ("Q4_K_M", "4-bit medium"), |
| ("Q5_K_M", "5-bit medium"), |
| ("Q8_0", "8-bit"), |
| ] |
|
|
| quantized_files = [] |
| for quant_type, desc in quant_formats: |
| print(f" Creating {quant_type} ({desc})...") |
| quant_file = f"{gguf_output_dir}/{MODEL_NAME}-{quant_type.lower()}.gguf" |
| result = subprocess.run([quantize_bin, gguf_fp16, quant_file, quant_type], capture_output=True, text=True) |
| if result.returncode == 0: |
| size_gb = os.path.getsize(quant_file) / (1024**3) |
| print(f" {quant_type}: {size_gb:.2f} GB") |
| quantized_files.append((quant_file, quant_type)) |
| else: |
| print(f" {quant_type}: FAILED - {result.stderr}") |
|
|
| |
| print("\n[6/7] Uploading to Hugging Face Hub...") |
| api = HfApi(token=HF_TOKEN) |
|
|
| |
| for f in os.listdir(gguf_output_dir): |
| if f.endswith('.gguf'): |
| filepath = f"{gguf_output_dir}/{f}" |
| print(f" Uploading {f}...") |
| api.upload_file( |
| path_or_fileobj=filepath, |
| path_in_repo=f, |
| repo_id=OUTPUT_REPO, |
| ) |
|
|
| |
| print("\n[7/7] Creating model info...") |
| info_content = f""" |
| ## {MODEL_NAME} |
| |
| Vision/Multimodal model converted to GGUF. |
| |
| **Source:** {MODEL_PATH} |
| **Base:** {BASE_MODEL if BASE_MODEL else "N/A"} |
| |
| ### Files |
| - `{MODEL_NAME}-f16.gguf` - Full precision |
| - `{MODEL_NAME}-q8_0.gguf` - 8-bit quantized |
| - `{MODEL_NAME}-q5_k_m.gguf` - 5-bit quantized |
| - `{MODEL_NAME}-q4_k_m.gguf` - 4-bit quantized (recommended) |
| - `mmproj-*.gguf` - Vision projector (required for image input) |
| |
| ### Usage with llama.cpp |
| ```bash |
| llama-mtmd-cli -m {MODEL_NAME}-q4_k_m.gguf --mmproj mmproj-{MODEL_NAME}-f16.gguf --image your_image.jpg |
| ``` |
| """ |
|
|
| |
| try: |
| existing = api.hf_hub_download(OUTPUT_REPO, "README.md") |
| with open(existing) as f: |
| content = f.read() |
| content += "\n" + info_content |
| except: |
| content = f"# {OUTPUT_REPO.split('/')[-1]}\n\nGGUF model collection.\n" + info_content |
|
|
| api.upload_file( |
| path_or_fileobj=content.encode(), |
| path_in_repo="README.md", |
| repo_id=OUTPUT_REPO, |
| ) |
|
|
| print("\n" + "=" * 60) |
| print(f"CONVERSION COMPLETE: {MODEL_NAME}") |
| print(f"Repository: https://huggingface.co/{OUTPUT_REPO}") |
| print("=" * 60) |
|
|