| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| MODEL_DIR = "path_to_HF_format_folder" | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True) | |
| model = AutoModelForCausalLM.from_pretrained(MODEL_DIR, trust_remote_code=True) | |
| # Move model to GPU if available | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| model.to(device) | |
| prompt = "Your prompt here." | |
| inputs = tokenizer(prompt, return_tensors="pt").to(device) | |
| # Adjust paramter as per required | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| **inputs, | |
| max_new_tokens=100, | |
| do_sample=True, | |
| temperature=0.7, | |
| top_k=50, | |
| top_p=0.95, | |
| repetition_penalty=1.1, | |
| num_return_sequences=1 | |
| ) | |
| generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| print("Generated Text:\n", generated_text) |