| import torch | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| tokenizer = AutoTokenizer.from_pretrained('./', config='tokenizer_config.json', vocab_file='vocab.json') | |
| model = AutoModelForCausalLM.from_pretrained('./', weights='model.safetensors') | |
| def test_model(input_text, expected_output): | |
| inputs = tokenizer(input_text, return_tensors='pt') | |
| outputs = model.generate(**inputs) | |
| generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| assert generated_text == expected_output, f"Expected: {expected_output}, but got: {generated_text}" | |
| test_model("Hello, how are you?", "Hello, how are you?") |