File size: 1,713 Bytes
fca155a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
from huggingface_hub import list_repo_files, hf_hub_download
import os

REPO_ID = "bartowski/Qwen2-VL-2B-Instruct-GGUF"
MODEL_DIR = "models"

def list_files():
    print(f"Inspecting repo: {REPO_ID}")
    files = list_repo_files(repo_id=REPO_ID)
    return files

def download_file(filename):
    print(f"Downloading {filename}...")
    hf_hub_download(
        repo_id=REPO_ID,
        filename=filename,
        local_dir=MODEL_DIR,
        local_dir_use_symlinks=False
    )
    print(f"✅ Saved to {MODEL_DIR}/{filename}")

if __name__ == "__main__":
    if not os.path.exists(MODEL_DIR):
        os.makedirs(MODEL_DIR)
        
    files = list_files()
    
    # 1. Find the best Quantization (Q4_K_M is the sweet spot)
    target_quant = "q4_k_m.gguf"
    model_file = next((f for f in files if target_quant in f.lower()), None)
    
    if model_file:
        print(f"Found model: {model_file}")
        if not os.path.exists(os.path.join(MODEL_DIR, model_file)):
            download_file(model_file)
        else:
            print("✅ Model already exists.")
    else:
        print("❌ Could not find Q4_K_M model file.")

    # 2. Look for mmproj (Vision Adapter) if it exists separately
    # Qwen2-VL GGUFs usually embed it, but let's check for 'mmproj' just in case.
    mmproj_file = next((f for f in files if "mmproj" in f.lower()), None)
    if mmproj_file:
        print(f"Found projector: {mmproj_file}")
        if not os.path.exists(os.path.join(MODEL_DIR, mmproj_file)):
            download_file(mmproj_file)
        else:
            print("✅ Projector already exists.")
    else:
        print("ℹ️ No separate mmproj file found (likely embedded or not needed for this repo).")