ADAPT-Chase's picture
Add files using upload-large-folder tool
3e626a5 verified
#!/usr/bin/env python3
"""
Test script to verify the trained model works correctly.
"""
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import time
def test_model_loading():
"""Test that the model loads successfully."""
print("πŸ§ͺ Testing model loading...")
model_path = "/home/x/adaptai/experiments/qwen3-8b-elizabeth-simple/"
# Load tokenizer
print("πŸ“₯ Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
print("βœ… Tokenizer loaded successfully!")
# Load model
print("πŸ“₯ Loading model...")
start_time = time.time()
model = AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype=torch.bfloat16,
device_map="auto",
trust_remote_code=True
)
load_time = time.time() - start_time
print(f"βœ… Model loaded successfully in {load_time:.2f} seconds!")
# Check device placement
print(f"πŸ“Š Model device: {model.device}")
print(f"πŸ“Š Model dtype: {model.dtype}")
return model, tokenizer
def test_inference(model, tokenizer):
"""Test basic inference."""
print("\nπŸ§ͺ Testing inference...")
# Test prompt
prompt = "Hello, how are you today?"
print(f"πŸ“ Prompt: {prompt}")
# Tokenize
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
print(f"πŸ”’ Input tokens: {inputs.input_ids.shape}")
# Generate
start_time = time.time()
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=50,
do_sample=True,
temperature=0.7,
pad_token_id=tokenizer.eos_token_id
)
gen_time = time.time() - start_time
# Decode
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(f"βœ… Generation completed in {gen_time:.2f} seconds!")
print(f"πŸ’¬ Response: {response}")
return response
def test_tool_use_capability(model, tokenizer):
"""Test tool use capability."""
print("\nπŸ§ͺ Testing tool use capability...")
tool_prompt = """Please help me calculate the square root of 144 using the calculator tool.
Available tools:
- calculator: Performs mathematical calculations
Please respond with the tool call format."""
print(f"πŸ“ Tool prompt: {tool_prompt[:100]}...")
inputs = tokenizer(tool_prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=100,
do_sample=True,
temperature=0.3,
pad_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(f"πŸ’¬ Tool use response: {response}")
# Check if response contains tool-like patterns
if "calculator" in response.lower() or "tool" in response.lower():
print("βœ… Tool use capability detected!")
return True
else:
print("⚠️ Tool use pattern not clearly detected")
return False
if __name__ == "__main__":
print("=" * 60)
print("πŸ€– Qwen3-8B-Elizabeth-Simple Model Test")
print("=" * 60)
try:
model, tokenizer = test_model_loading()
test_inference(model, tokenizer)
tool_use_detected = test_tool_use_capability(model, tokenizer)
print("\n" + "=" * 60)
print("πŸŽ‰ Model Test Summary:")
print(f" βœ… Model loading: Successful")
print(f" βœ… Basic inference: Working")
print(f" βœ… Tool use capability: {'Detected' if tool_use_detected else 'Needs verification'}")
print(" πŸ“ Model is ready for deployment!")
print("=" * 60)
except Exception as e:
print(f"\n❌ Test failed with error: {e}")
raise