| | import torch |
| | from transformers import AutoModelForSeq2SeqLM, AutoTokenizer |
| |
|
| | class ModelHandler: |
| | def __init__(self): |
| | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| | self.model = AutoModelForSeq2SeqLM.from_pretrained("shaheerzk/text_to_sql") |
| | self.tokenizer = AutoTokenizer.from_pretrained("shaheerzk/text_to_sql") |
| | self.model.to(self.device) |
| |
|
| | def handle(self, inputs): |
| | |
| | text = inputs.get("text", "") |
| | inputs = self.tokenizer(text, return_tensors="pt").to(self.device) |
| |
|
| | |
| | with torch.no_grad(): |
| | outputs = self.model.generate(**inputs) |
| |
|
| | |
| | generated_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True) |
| | return {"generated_text": generated_text} |
| |
|