File size: 3,931 Bytes
3e626a5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
#!/usr/bin/env python3
"""
Test script to verify the trained model works correctly.
"""
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import time
def test_model_loading():
"""Test that the model loads successfully."""
print("π§ͺ Testing model loading...")
model_path = "/home/x/adaptai/experiments/qwen3-8b-elizabeth-simple/"
# Load tokenizer
print("π₯ Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
print("β
Tokenizer loaded successfully!")
# Load model
print("π₯ Loading model...")
start_time = time.time()
model = AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype=torch.bfloat16,
device_map="auto",
trust_remote_code=True
)
load_time = time.time() - start_time
print(f"β
Model loaded successfully in {load_time:.2f} seconds!")
# Check device placement
print(f"π Model device: {model.device}")
print(f"π Model dtype: {model.dtype}")
return model, tokenizer
def test_inference(model, tokenizer):
"""Test basic inference."""
print("\nπ§ͺ Testing inference...")
# Test prompt
prompt = "Hello, how are you today?"
print(f"π Prompt: {prompt}")
# Tokenize
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
print(f"π’ Input tokens: {inputs.input_ids.shape}")
# Generate
start_time = time.time()
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=50,
do_sample=True,
temperature=0.7,
pad_token_id=tokenizer.eos_token_id
)
gen_time = time.time() - start_time
# Decode
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(f"β
Generation completed in {gen_time:.2f} seconds!")
print(f"π¬ Response: {response}")
return response
def test_tool_use_capability(model, tokenizer):
"""Test tool use capability."""
print("\nπ§ͺ Testing tool use capability...")
tool_prompt = """Please help me calculate the square root of 144 using the calculator tool.
Available tools:
- calculator: Performs mathematical calculations
Please respond with the tool call format."""
print(f"π Tool prompt: {tool_prompt[:100]}...")
inputs = tokenizer(tool_prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=100,
do_sample=True,
temperature=0.3,
pad_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(f"π¬ Tool use response: {response}")
# Check if response contains tool-like patterns
if "calculator" in response.lower() or "tool" in response.lower():
print("β
Tool use capability detected!")
return True
else:
print("β οΈ Tool use pattern not clearly detected")
return False
if __name__ == "__main__":
print("=" * 60)
print("π€ Qwen3-8B-Elizabeth-Simple Model Test")
print("=" * 60)
try:
model, tokenizer = test_model_loading()
test_inference(model, tokenizer)
tool_use_detected = test_tool_use_capability(model, tokenizer)
print("\n" + "=" * 60)
print("π Model Test Summary:")
print(f" β
Model loading: Successful")
print(f" β
Basic inference: Working")
print(f" β
Tool use capability: {'Detected' if tool_use_detected else 'Needs verification'}")
print(" π Model is ready for deployment!")
print("=" * 60)
except Exception as e:
print(f"\nβ Test failed with error: {e}")
raise |