# handler.py from transformers import AutoTokenizer, AutoModelForCausalLM import torch # Global variables to cache model and tokenizer model = None tokenizer = None def load_model(): global model, tokenizer if model is None: model_name = "adenorwer/aerwcr" # or your actual model path tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16).to("cuda") model.eval() def predict(prompt: str, max_length: int = 50): load_model() inputs = tokenizer(prompt, return_tensors="pt").to("cuda") outputs = model.generate(**inputs, max_length=max_length) return tokenizer.decode(outputs[0], skip_special_tokens=True) # Example usage (for testing locally) if __name__ == "__main__": result = predict("Hello, how are you?") print(result)