#!/usr/bin/env python3 """ Test script to verify the trained model works correctly. """ from transformers import AutoModelForCausalLM, AutoTokenizer import torch import time def test_model_loading(): """Test that the model loads successfully.""" print("๐Ÿงช Testing model loading...") model_path = "/home/x/adaptai/experiments/qwen3-8b-elizabeth-simple/" # Load tokenizer print("๐Ÿ“ฅ Loading tokenizer...") tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) print("โœ… Tokenizer loaded successfully!") # Load model print("๐Ÿ“ฅ Loading model...") start_time = time.time() model = AutoModelForCausalLM.from_pretrained( model_path, torch_dtype=torch.bfloat16, device_map="auto", trust_remote_code=True ) load_time = time.time() - start_time print(f"โœ… Model loaded successfully in {load_time:.2f} seconds!") # Check device placement print(f"๐Ÿ“Š Model device: {model.device}") print(f"๐Ÿ“Š Model dtype: {model.dtype}") return model, tokenizer def test_inference(model, tokenizer): """Test basic inference.""" print("\n๐Ÿงช Testing inference...") # Test prompt prompt = "Hello, how are you today?" print(f"๐Ÿ“ Prompt: {prompt}") # Tokenize inputs = tokenizer(prompt, return_tensors="pt").to(model.device) print(f"๐Ÿ”ข Input tokens: {inputs.input_ids.shape}") # Generate start_time = time.time() with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=50, do_sample=True, temperature=0.7, pad_token_id=tokenizer.eos_token_id ) gen_time = time.time() - start_time # Decode response = tokenizer.decode(outputs[0], skip_special_tokens=True) print(f"โœ… Generation completed in {gen_time:.2f} seconds!") print(f"๐Ÿ’ฌ Response: {response}") return response def test_tool_use_capability(model, tokenizer): """Test tool use capability.""" print("\n๐Ÿงช Testing tool use capability...") tool_prompt = """Please help me calculate the square root of 144 using the calculator tool. Available tools: - calculator: Performs mathematical calculations Please respond with the tool call format.""" print(f"๐Ÿ“ Tool prompt: {tool_prompt[:100]}...") inputs = tokenizer(tool_prompt, return_tensors="pt").to(model.device) with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=100, do_sample=True, temperature=0.3, pad_token_id=tokenizer.eos_token_id ) response = tokenizer.decode(outputs[0], skip_special_tokens=True) print(f"๐Ÿ’ฌ Tool use response: {response}") # Check if response contains tool-like patterns if "calculator" in response.lower() or "tool" in response.lower(): print("โœ… Tool use capability detected!") return True else: print("โš ๏ธ Tool use pattern not clearly detected") return False if __name__ == "__main__": print("=" * 60) print("๐Ÿค– Qwen3-8B-Elizabeth-Simple Model Test") print("=" * 60) try: model, tokenizer = test_model_loading() test_inference(model, tokenizer) tool_use_detected = test_tool_use_capability(model, tokenizer) print("\n" + "=" * 60) print("๐ŸŽ‰ Model Test Summary:") print(f" โœ… Model loading: Successful") print(f" โœ… Basic inference: Working") print(f" โœ… Tool use capability: {'Detected' if tool_use_detected else 'Needs verification'}") print(" ๐Ÿ“ Model is ready for deployment!") print("=" * 60) except Exception as e: print(f"\nโŒ Test failed with error: {e}") raise