|
|
|
|
|
|
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
import torch |
|
|
|
|
|
|
|
|
model = None |
|
|
tokenizer = None |
|
|
|
|
|
def load_model(): |
|
|
global model, tokenizer |
|
|
if model is None: |
|
|
model_name = "adenorwer/aerwcr" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16).to("cuda") |
|
|
model.eval() |
|
|
|
|
|
def predict(prompt: str, max_length: int = 50): |
|
|
load_model() |
|
|
inputs = tokenizer(prompt, return_tensors="pt").to("cuda") |
|
|
outputs = model.generate(**inputs, max_length=max_length) |
|
|
return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
result = predict("Hello, how are you?") |
|
|
print(result) |