NEON / upload_and_test.py
picklefried706's picture
Upload folder using huggingface_hub
40a9423 verified
import os
from huggingface_hub import login, upload_folder
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# -------------------------------
# 1️⃣ Login safely
# -------------------------------
# Use environment variable for your token
token = os.environ.get("HF_TOKEN")
if not token:
token = input("Enter your Hugging Face token (won't be saved): ")
login(token=token)
# -------------------------------
# 2️⃣ Upload the model folder
# -------------------------------
# This folder should contain pytorch_model.bin, config.json, tokenizer.json
upload_folder(
folder_path=".", # current folder
repo_id="picklefried706/NEON", # your HF repo
repo_type="model"
)
print("✅ Upload complete!")
# -------------------------------
# 3️⃣ Test your model
# -------------------------------
model_name = "picklefried706/NEON"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
chat = pipeline("text-generation", model=model, tokenizer=tokenizer)
prompt = "User: Hello! How are you?\nAssistant:"
response = chat(prompt, max_new_tokens=150)
print("\n--- Model Response ---")
print(response[0]['generated_text'])