| from fastapi import FastAPI |
| from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer |
|
|
| |
| app = FastAPI() |
|
|
| |
| model_name = "facebook/bart-large-cnn" |
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
| model = AutoModelForSeq2SeqLM.from_pretrained(model_name) |
| pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer) |
|
|
| |
| @app.get("/generate") |
| def generate(text: str): |
| """ |
| Generate text based on the input text using a state-of-the-art model. |
| |
| Parameters: |
| - text (str): The input text for text generation. |
| |
| Returns: |
| - dict: A JSON response containing the generated text. |
| """ |
| |
| output = pipe(text, max_length=100, num_return_sequences=1, clean_up_tokenization_spaces=True) |
| |
| |
| generated_text = output[0]['generated_text'] |
| |
| |
| return {"output": generated_text} |
|
|
| |
| @app.get("/") |
| def read_root(): |
| """ |
| Welcome to the Text Generation API! |
| |
| You can generate text by making a GET request to `/generate` with the query parameter `text`. |
| |
| Example usage: |
| ``` |
| GET /generate?text=Once%20upon%20a%20time... |
| ``` |
| """ |
| return {"message": "Welcome to the Text Generation API! Use `/generate` endpoint to generate text."} |
|
|