| import torch | |
| from transformers import AutoModelForSeq2SeqLM, AutoTokenizer | |
| # Load your pre-trained model and tokenizer | |
| model = AutoModelForSeq2SeqLM.from_pretrained("Vuks/sanchit_whisper") | |
| tokenizer = AutoTokenizer.from_pretrained("Vuks/sanchit_whisper") | |
| def handle(request, context): | |
| inputs = tokenizer(request["inputs"], return_tensors="pt") | |
| outputs = model(**inputs) | |
| return outputs | |