llm_server / main.py
Paridhim's picture
Upload main.py
8dc4dc1 verified
raw
history blame
556 Bytes
from wrapper import LLMWrapper
import uvicorn
from fastapi import FastAPI, Request
app = FastAPI()
llm_wrapper = LLMWrapper()
@app.post("/")
async def generate_text(request: Request):
raw_data = await request.body() # Get the raw body data from the request
prompt = raw_data.decode('utf-8')
if not prompt:
return {'error': 'Prompt is required'}, 400
generated_text = llm_wrapper.generate_text(prompt)
return {'generated_text': generated_text}
if __name__ == '__main__':
uvicorn.run(app, host='127.0.0.1', port=8001)