File size: 3,248 Bytes
6d12932 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
"""
Upload Nursing LLM Model to Hugging Face
Run this script from wherever your trained model is saved (Colab, local machine, etc.)
"""
from huggingface_hub import HfApi, login, create_repo
import os
# =============================================================================
# CONFIGURATION
# =============================================================================
MODEL_NAME = "NurseCitizenDeveloper/nursing-llama-3-8b-fons"
LOCAL_MODEL_PATH = "./nursing-llama-3-8b-fons" # Update this to your model's location
# Common locations to check:
# - Google Colab: "/content/nursing-llama-3-8b-fons"
# - Google Drive: "/content/drive/MyDrive/nursing-llama-3-8b-fons"
# - Local: "C:/Users/g0226/path/to/model"
# =============================================================================
# STEP 1: Login to Hugging Face
# =============================================================================
print("π Logging in to Hugging Face...")
login() # This will prompt for your token or use HF_TOKEN env variable
# =============================================================================
# STEP 2: Verify Model Files Exist
# =============================================================================
print(f"\nπ Checking for model files in: {LOCAL_MODEL_PATH}")
required_files = [
"config.json",
"tokenizer_config.json",
"tokenizer.json",
"special_tokens_map.json"
]
# Model weights (at least one should exist)
weight_files = [
"pytorch_model.bin",
"model.safetensors",
"adapter_model.safetensors", # If using LoRA
"adapter_config.json" # If using LoRA
]
missing_files = []
for file in required_files:
if not os.path.exists(os.path.join(LOCAL_MODEL_PATH, file)):
missing_files.append(file)
has_weights = any(os.path.exists(os.path.join(LOCAL_MODEL_PATH, f)) for f in weight_files)
if missing_files:
print(f"β οΈ Missing required files: {missing_files}")
if not has_weights:
print(f"β No model weight files found! Need one of: {weight_files}")
print("\nπ‘ If you trained with LoRA, make sure adapter files are present.")
exit(1)
print("β
Model files verified!")
# =============================================================================
# STEP 3: Upload to Hugging Face
# =============================================================================
print(f"\nπ Uploading model to {MODEL_NAME}...")
api = HfApi()
# Create repo if it doesn't exist
try:
create_repo(MODEL_NAME, exist_ok=True, repo_type="model")
print(f"β
Repository ready: https://huggingface.co/{MODEL_NAME}")
except Exception as e:
print(f"βΉοΈ Repository already exists or error: {e}")
# Upload all files
print("\nπ€ Uploading files...")
api.upload_folder(
folder_path=LOCAL_MODEL_PATH,
repo_id=MODEL_NAME,
repo_type="model",
commit_message="Upload trained nursing LLM model"
)
print(f"\nβ
Upload complete!")
print(f"π Model URL: https://huggingface.co/{MODEL_NAME}")
print(f"π Space URL: https://huggingface.co/spaces/NurseCitizenDeveloper/relational-ai-nursing")
print("\nβ³ The Space should automatically restart and work now!")
|