Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI, Query | |
| from transformers import pipeline | |
| # Initialize FastAPI app | |
| app = FastAPI() | |
| # Initialize text generation pipeline | |
| def initialize_pipeline(): | |
| return pipeline("text2text-generation", model="google/flan-t5-small") | |
| # Global variable to hold the pipeline instance | |
| pipe = initialize_pipeline() | |
| # Define home endpoint | |
| def home(): | |
| return {"message": "Hello Siddhant"} | |
| # Define generate endpoint with prompt parameter | |
| def generate_text( | |
| text: str = Query(None, description="Input text to generate from"), | |
| prompt: str = Query(None, description="Optional prompt for fine-tuning the generated text"), | |
| ): | |
| if not text and not prompt: | |
| return {"error": "Please provide either 'text' or 'prompt' parameter."} | |
| if prompt: | |
| input_text = f"{text} {prompt}" if text else prompt | |
| else: | |
| input_text = text | |
| output = pipe(input_text, max_length=100, do_sample=True, top_k=50) | |
| return {"input_text": input_text, "output": output[0]["generated_text"]} | |