from transformers import AutoModelForCausalLM, AutoTokenizer import torch class ModelManager: def __init__(self, model_name="BanglaLLM/BanglaLLama-3.2-3b-unlop-culturax-base-v0.0.3"): self.device = "cuda" if torch.cuda.is_available() else "cpu" self.tokenizer = AutoTokenizer.from_pretrained(model_name) self.model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16 if self.device == "cuda" else torch.float32) self.model = self.model.to(self.device) def generate_response(self, prompt, max_length, temperature, top_k, top_p): inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device) outputs = self.model.generate( inputs["input_ids"], max_length=max_length, temperature=temperature, top_k=top_k, top_p=top_p, do_sample=True ) return self.tokenizer.decode(outputs[0], skip_special_tokens=True)