Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI | |
| from trasformers import pipeline | |
| ## Create FastAPI app instance | |
| app = FastAPI() | |
| # Initialize a text generation pipeline | |
| # Load model directly | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
| tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-small") | |
| model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-small") | |
| def home(): | |
| return {"message": "Welcome to the Text Generation API!"} | |
| #Define a function to handle the GET request at /generate | |
| def generate(text:str): | |
| ## Generate text using the model | |
| output=pipe(text) | |
| ##return the text generate in Json response | |
| return {"output":output[0]['generated_text']} | |