Spaces:
Sleeping
Sleeping
File size: 986 Bytes
bdd1be2 b042c75 bdd1be2 b042c75 bdd1be2 b042c75 bdd1be2 b042c75 bdd1be2 b042c75 bdd1be2 b042c75 bdd1be2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | from fastapi import FastAPI
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
## Create FastAPI app instance
app = FastAPI()
# Initialize a text generation pipeline
# Load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-small")
model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-small")
# Check for GPU availability
device = 0 if torch.cuda.is_available() else -1
@app.get("/")
def home():
return {"message": "Welcome to the Text Generation API!"}
#Define a function to handle the GET request at /generate
@app.get("/generate")
def generate(text: str):
## Generate text using the model
input_ids = tokenizer(text, return_tensors="pt").input_ids
outputs = model.generate(input_ids, max_length=100)
output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
##return the text generated in Json response
return {"output": output_text}
|