File size: 875 Bytes
1fb8193 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 |
# handler.py
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Global variables to cache model and tokenizer
model = None
tokenizer = None
def load_model():
global model, tokenizer
if model is None:
model_name = "adenorwer/aerwcr" # or your actual model path
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16).to("cuda")
model.eval()
def predict(prompt: str, max_length: int = 50):
load_model()
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
outputs = model.generate(**inputs, max_length=max_length)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Example usage (for testing locally)
if __name__ == "__main__":
result = predict("Hello, how are you?")
print(result) |