Spaces:
Running on CPU Upgrade
Running on CPU Upgrade
File size: 8,761 Bytes
3b60a91 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 | #!/usr/bin/env python3
"""
Agent Zero Model Diagnostics — Tests loading each model from the catalog.
Run this on CPU to identify config/tokenizer issues before deploying to ZeroGPU.
"""
import os
import sys
import json
import traceback
from typing import Dict, Any
# Install deps
import subprocess
subprocess.run([sys.executable, "-m", "pip", "install", "-q",
"transformers>=4.52.0", "accelerate>=0.30.0", "torch", "huggingface-hub>=0.25.0"],
capture_output=True)
import torch
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
AutoProcessor,
AutoModelForImageTextToText,
AutoConfig,
)
from huggingface_hub import HfApi
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
print("❌ ERROR: HF_TOKEN not set!")
sys.exit(1)
print(f"✅ HF_TOKEN present (length: {len(HF_TOKEN)})")
print(f"✅ PyTorch version: {torch.__version__}")
print(f"✅ CUDA available: {torch.cuda.is_available()}")
import transformers
print(f"✅ Transformers version: {transformers.__version__}")
# Model catalog
MODELS = {
"chatgpt5-494m": {
"repo": "ScottzillaSystems/ChatGPT-5",
"architecture": "causal_lm",
"size": "494M",
},
"qwen3.5-9b-opus": {
"repo": "ScottzillaSystems/Huihui-Qwen3.5-9B-Claude-4.6-Opus-abliterated",
"architecture": "conditional_gen",
"size": "9.6B",
},
"supergemma4-7.5b": {
"repo": "ScottzillaSystems/supergemma4-e4b-abliterated",
"architecture": "conditional_gen",
"size": "7.5B",
},
"cydonia-24b": {
"repo": "ScottzillaSystems/Cydonia-24B-v4.1",
"architecture": "causal_lm",
"size": "24B",
},
"qwen3.6-27b": {
"repo": "ScottzillaSystems/Huihui-Qwen3.6-27B-abliterated",
"architecture": "conditional_gen",
"size": "27.8B",
},
"qwen3-vl-8b": {
"repo": "ScottzillaSystems/Huihui-Qwen3-VL-8B-Instruct-abliterated",
"architecture": "conditional_gen",
"size": "8.8B",
},
"qwen3.5-9b-base": {
"repo": "ScottzillaSystems/Qwen3.5-9B",
"architecture": "conditional_gen",
"size": "9.6B",
},
}
results = {}
print("\n" + "=" * 80)
print("PHASE 1: Check model configs (no download, just metadata)")
print("=" * 80)
for key, model_info in MODELS.items():
repo = model_info["repo"]
print(f"\n{'─' * 60}")
print(f"Testing: {key} ({repo})")
print(f"{'─' * 60}")
result = {"repo": repo, "config_ok": False, "tokenizer_ok": False,
"chat_template_ok": False, "errors": []}
# Test 1: Load config
try:
config = AutoConfig.from_pretrained(repo, trust_remote_code=True, token=HF_TOKEN)
arch = config.architectures[0] if hasattr(config, 'architectures') and config.architectures else "unknown"
model_type = getattr(config, 'model_type', 'unknown')
print(f" ✅ Config loaded: arch={arch}, model_type={model_type}")
result["config_ok"] = True
result["architecture_actual"] = arch
result["model_type"] = model_type
except Exception as e:
print(f" ❌ Config FAILED: {type(e).__name__}: {e}")
result["errors"].append(f"Config: {type(e).__name__}: {e}")
results[key] = result
continue
# Test 2: Load tokenizer/processor
try:
if model_info["architecture"] == "conditional_gen":
tokenizer = AutoProcessor.from_pretrained(repo, trust_remote_code=True, token=HF_TOKEN)
print(f" ✅ AutoProcessor loaded")
else:
tokenizer = AutoTokenizer.from_pretrained(repo, trust_remote_code=True, token=HF_TOKEN)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
print(f" ✅ AutoTokenizer loaded")
result["tokenizer_ok"] = True
result["tokenizer_type"] = type(tokenizer).__name__
except Exception as e:
print(f" ❌ Tokenizer/Processor FAILED: {type(e).__name__}: {e}")
traceback.print_exc()
result["errors"].append(f"Tokenizer: {type(e).__name__}: {e}")
# Try alternative loading
print(f" 🔄 Trying alternative loading...")
try:
if model_info["architecture"] == "conditional_gen":
tokenizer = AutoTokenizer.from_pretrained(repo, trust_remote_code=True, token=HF_TOKEN)
print(f" ⚠️ AutoTokenizer works instead of AutoProcessor!")
result["tokenizer_ok"] = True
result["tokenizer_type"] = f"FALLBACK: {type(tokenizer).__name__}"
result["errors"].append("AutoProcessor failed but AutoTokenizer works")
else:
tokenizer = AutoProcessor.from_pretrained(repo, trust_remote_code=True, token=HF_TOKEN)
print(f" ⚠️ AutoProcessor works instead of AutoTokenizer!")
result["tokenizer_ok"] = True
result["tokenizer_type"] = f"FALLBACK: {type(tokenizer).__name__}"
except Exception as e2:
print(f" ❌ Alternative also FAILED: {type(e2).__name__}: {e2}")
result["errors"].append(f"Alt tokenizer: {type(e2).__name__}: {e2}")
# Test 3: Chat template
if result["tokenizer_ok"]:
try:
test_messages = [
{"role": "user", "content": "Hello, how are you?"}
]
text = tokenizer.apply_chat_template(
test_messages, tokenize=False, add_generation_prompt=True
)
print(f" ✅ Chat template works (output length: {len(text)} chars)")
print(f" First 200 chars: {repr(text[:200])}")
result["chat_template_ok"] = True
result["chat_template_sample"] = text[:200]
except Exception as e:
print(f" ❌ Chat template FAILED: {type(e).__name__}: {e}")
traceback.print_exc()
result["errors"].append(f"Chat template: {type(e).__name__}: {e}")
# Test 4: Tokenization
if result["tokenizer_ok"] and result["chat_template_ok"]:
try:
if model_info["architecture"] == "conditional_gen":
inputs = tokenizer(text=[text], return_tensors="pt", padding=True)
else:
inputs = tokenizer(text, return_tensors="pt", padding=True)
tensor_keys = [k for k in inputs.keys() if hasattr(inputs[k], 'shape')]
for k in tensor_keys:
print(f" ✅ Input '{k}': shape={inputs[k].shape}, dtype={inputs[k].dtype}")
result["tokenization_ok"] = True
except Exception as e:
print(f" ❌ Tokenization FAILED: {type(e).__name__}: {e}")
traceback.print_exc()
result["errors"].append(f"Tokenization: {type(e).__name__}: {e}")
result["tokenization_ok"] = False
# Test 5: Check which Auto class would load this model
try:
# Detect which class transformers would use
if arch in ["Qwen2ForCausalLM", "MistralForCausalLM", "LlamaForCausalLM"]:
result["recommended_loader"] = "AutoModelForCausalLM"
elif "ForConditionalGeneration" in arch or "ForImageTextToText" in arch:
result["recommended_loader"] = "AutoModelForImageTextToText"
else:
result["recommended_loader"] = f"Unknown for {arch}"
print(f" ℹ️ Recommended loader: {result['recommended_loader']}")
except Exception as e:
pass
results[key] = result
# Summary
print("\n\n" + "=" * 80)
print("SUMMARY")
print("=" * 80)
for key, r in results.items():
status_parts = []
if r["config_ok"]:
status_parts.append("config✅")
else:
status_parts.append("config❌")
if r.get("tokenizer_ok"):
status_parts.append("tokenizer✅")
else:
status_parts.append("tokenizer❌")
if r.get("chat_template_ok"):
status_parts.append("chat_tmpl✅")
else:
status_parts.append("chat_tmpl❌")
if r.get("tokenization_ok"):
status_parts.append("tokenize✅")
else:
status_parts.append("tokenize❌")
status = " | ".join(status_parts)
emoji = "✅" if all([r["config_ok"], r.get("tokenizer_ok"), r.get("chat_template_ok"), r.get("tokenization_ok")]) else "❌"
print(f" {emoji} {key}: {status}")
if r.get("errors"):
for err in r["errors"]:
print(f" └─ {err}")
if r.get("recommended_loader"):
print(f" └─ Loader: {r['recommended_loader']}")
# Dump full results as JSON
print("\n\n" + "=" * 80)
print("FULL RESULTS JSON:")
print("=" * 80)
print(json.dumps(results, indent=2, default=str))
|