Spaces:
Sleeping
Sleeping
| import torch | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| from peft import PeftModel | |
| def test_model(): | |
| base_model_name = "Qwen/Qwen2.5-0.5B-Instruct" | |
| adapter_path = "./qwen-codeforces-cots" | |
| print("Loading tokenizer...") | |
| tokenizer = AutoTokenizer.from_pretrained(adapter_path, trust_remote_code=True) | |
| print("Loading base model...") | |
| base_model = AutoModelForCausalLM.from_pretrained( | |
| base_model_name, | |
| dtype=torch.float32, | |
| trust_remote_code=True, | |
| ) | |
| print("Loading fine-tuned adapter...") | |
| model = PeftModel.from_pretrained(base_model, adapter_path) | |
| model.eval() | |
| # Test with a simple programming problem | |
| test_problem = """You are given an array a of n integers. Find the maximum element in the array. | |
| Input format: | |
| The first line contains an integer n (1 ≤ n ≤ 100). | |
| The second line contains n integers a₁, a₂, ..., aₙ (1 ≤ aᵢ ≤ 1000). | |
| Output format: | |
| Print the maximum element.""" | |
| messages = [ | |
| {"role": "user", "content": f"Please reason step by step about the solution, then provide a complete implementation.\n\n# Problem\n\n{test_problem}"} | |
| ] | |
| text = tokenizer.apply_chat_template( | |
| messages, | |
| tokenize=False, | |
| add_generation_prompt=True | |
| ) | |
| inputs = tokenizer(text, return_tensors="pt") | |
| print("\nGenerating response...") | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| **inputs, | |
| max_new_tokens=512, | |
| temperature=0.7, | |
| do_sample=True, | |
| top_p=0.9, | |
| ) | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| print("\n" + "="*80) | |
| print("MODEL RESPONSE:") | |
| print("="*80) | |
| print(response) | |
| print("="*80) | |
| if __name__ == "__main__": | |
| test_model() | |