File size: 4,233 Bytes
7d3d63c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
#!/usr/bin/env python3
"""
Upload Bengali AI using Hugging Face CLI with environment token
"""
import os
import subprocess
def upload_with_hf_cli():
"""Upload using hf CLI with environment token"""
print("🚀 Uploading Bengali AI with Hugging Face CLI")
print("=" * 50)
print("Repository: megharudushi/Sheikh")
# Check if model directory exists
if not os.path.exists("ready_bengali_ai"):
print("❌ Error: ready_bengali_ai directory not found!")
return False
# Show files to upload
files = os.listdir("ready_bengali_ai")
print(f"\n📁 Files to upload ({len(files)} total):")
total_size = 0
for file in sorted(files):
size = os.path.getsize(f"ready_bengali_ai/{file}") / (1024*1024)
total_size += size
print(f" 📄 {file} ({size:.1f}MB)")
print(f"📊 Total size: {total_size:.1f}MB")
# Check for token
token = os.environ.get('HF_TOKEN')
if not token:
print("\n❌ No HF_TOKEN environment variable found!")
print("\n🔧 To upload, you need to:")
print("1. Get your token from: https://huggingface.co/settings/tokens")
print("2. Set environment variable:")
print(" export HF_TOKEN=your_token_here")
print("3. Then run: hf upload megharudushi/Sheikh .")
return False
print(f"\n✅ Using token: {token[:8]}...")
try:
# Set environment variable for hf CLI
env = os.environ.copy()
env['HF_TOKEN'] = token
# Upload using hf CLI
print(f"\n📤 Running: hf upload megharudushi/Sheikh .")
result = subprocess.run(
["hf", "upload", "megharudushi/Sheikh", "."],
env=env,
capture_output=True,
text=True
)
if result.returncode == 0:
print("\n🎉 SUCCESS!")
print("🌐 Your model: https://huggingface.co/megharudushi/Sheikh")
print("\n💡 Anyone can now use your model:")
print("from transformers import AutoTokenizer, AutoModelForCausalLM")
print('tokenizer = AutoTokenizer.from_pretrained("megharudushi/Sheikh")')
print('model = AutoModelForCausalLM.from_pretrained("megharudushi/Sheikh")')
return True
else:
print(f"❌ Upload failed: {result.stderr}")
return False
except Exception as e:
print(f"❌ Upload error: {e}")
return False
def create_upload_commands():
"""Create upload commands file"""
commands = """# Hugging Face CLI Upload Commands
## Step 1: Get Your Token
1. Go to: https://huggingface.co/settings/tokens
2. Create a new token with "Write" permissions
3. Copy the token (starts with hf_)
## Step 2: Set Token and Upload
### Option A: Set environment variable
```bash
export HF_TOKEN=your_token_here
hf upload megharudushi/Sheikh .
```
### Option B: Pass token directly
```bash
hf upload megharudushi/Sheikh . --token your_token_here
```
### Option C: Login interactively
```bash
hf auth login
# Enter your token when prompted
hf upload megharudushi/Sheikh .
```
## Step 3: Verify Upload
After upload, visit: https://huggingface.co/megharudushi/Sheikh
## Files Being Uploaded
- model.bin (1.4GB) - Main model weights
- tokenizer.json (3.4MB) - Tokenizer configuration
- vocab.json (780KB) - Vocabulary
- merges.txt (446KB) - BPE merges
- config.json (13KB) - Model configuration
- Plus 6 other configuration files
"""
with open("HF_CLI_COMMANDS.md", "w", encoding="utf-8") as f:
f.write(commands)
print("📄 Created: HF_CLI_COMMANDS.md")
if __name__ == "__main__":
print("🇧🇩 BANGLI AI - HUGGING FACE CLI UPLOAD")
print("=" * 45)
# Create commands file
create_upload_commands()
# Try upload
success = upload_with_hf_cli()
if not success:
print("\n🔧 Manual upload required:")
print("1. Set HF_TOKEN environment variable")
print("2. Run: hf upload megharudushi/Sheikh .")
print("\n📖 See HF_CLI_COMMANDS.md for detailed instructions")
else:
print("\n🎊 Upload successful! Your Bengali AI is live!") |