Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
| import torch | |
| ## Create FastAPI app instance | |
| app = FastAPI() | |
| # Initialize a text generation pipeline | |
| # Load model and tokenizer | |
| tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-small") | |
| model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-small") | |
| # Check for GPU availability | |
| device = 0 if torch.cuda.is_available() else -1 | |
| def home(): | |
| return {"message": "Welcome to the Text Generation API!"} | |
| #Define a function to handle the GET request at /generate | |
| def generate(text: str): | |
| ## Generate text using the model | |
| input_ids = tokenizer(text, return_tensors="pt").input_ids | |
| outputs = model.generate(input_ids, max_length=100) | |
| output_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| ##return the text generated in Json response | |
| return {"output": output_text} | |