File size: 745 Bytes
72f4d4d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import torch
def evaluate_model(model_path, test_sentences):
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(model_path)
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
for sentence in test_sentences:
output = generator(sentence, max_length=50, num_return_sequences=1)
print(f"Input: {sentence}\nOutput: {output[0]['generated_text']}\n")
if __name__ == "__main__":
test_samples = [
"How does fine-tuning work?",
"Explain parameter-efficient methods like LoRA."
]
evaluate_model("models/llm-finetuned", test_samples)
|