File size: 2,329 Bytes
cb90fd0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
from fastapi import FastAPI
from pydantic import BaseModel
from typing import Union, List
import uvicorn
import logging
from datetime import datetime
import pytz
import torch
from main import ImageProcessor
image_processor = ImageProcessor() # Initialize the image processor
logging.basicConfig(filename="drinksLog.log", filemode='w')
logger = logging.getLogger("drinks")
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler("drinksLog.log")
logger.addHandler(file_handler)
app = FastAPI()
class RequestBody(BaseModel):
cat: str
img: str
class RequestData(BaseModel):
body: Union[RequestBody, List[RequestBody]]
@app.get("/status")
async def status():
return {"status": "AI Server is running"}
# Function to process the image based on the category
async def process_image(item: RequestBody):
category = item.cat
img_url = item.img
if category == "posm":
result = await image_processor.process_image(img_url)
return result
elif category == "planogram":
result = await image_processor.process_image(img_url)
return result
else:
return {"error": f"Unsupported category {category}"}
@app.post("/bats")
async def detect_items(request_data: RequestData):
try:
# Initialize an empty list to hold the processed results
results = []
# Check if data.body is a list or a single item
if isinstance(request_data.body, list):
# If body is already a list, iterate through each item
for item in request_data.body:
# Process each item and append the result to results
processed_result = await process_image(item) # Use await here
results.append(processed_result)
else:
# If body is a single item, process it directly
processed_result = await process_image(request_data.body) # Use await here
results.append(processed_result)
# Return the results as a JSON response
return results
except Exception as e:
logger.error(f"Error during detection: {str(e)}")
return {"error": "An error occurred during detection"}
if __name__ == "__main__":
try:
uvicorn.run(app, host="127.0.0.1", port=4444)
finally:
torch.cuda.empty_cache()
|