ashish-001 commited on
Commit
1ea1964
·
verified ·
1 Parent(s): da409b3

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +16 -0
  2. app.py +47 -0
  3. requirements.txt +6 -0
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ WORKDIR /app
4
+
5
+ COPY . /app
6
+
7
+ RUN pip install --no-cache-dir -r requirements.txt uvicorn
8
+
9
+ ENV HF_HOME=/home/user/cache
10
+ ENV TORCH_HOME=/home/user/cache
11
+ RUN mkdir -p /home/user/cache && chmod -R 777 /home/user/cache
12
+ COPY . .
13
+
14
+ EXPOSE 7860
15
+
16
+ CMD ["uvicorn","app:app","--host","0.0.0.0","--port","7860"]
app.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, File, UploadFile
2
+ import torch
3
+ from transformers import CLIPProcessor, CLIPModel
4
+ from dotenv import load_dotenv
5
+ import logging
6
+ import os
7
+ from PIL import Image
8
+
9
+ load_dotenv()
10
+
11
+
12
+ logging.basicConfig(level=logging.INFO)
13
+ logger = logging.getLogger(__name__)
14
+
15
+ app = FastAPI(title="Image Embedding API",
16
+ description="Returns CLIP image embeddings via GET")
17
+
18
+
19
+ HF_TOKEN = os.getenv('hf_token')
20
+
21
+ logger.info("Loading CLIP processor and model...")
22
+ try:
23
+ processor = CLIPProcessor.from_pretrained(
24
+ "openai/clip-vit-large-patch14", use_auth_token=HF_TOKEN)
25
+ clip_model = CLIPModel.from_pretrained(
26
+ "openai/clip-vit-large-patch14", use_auth_token=HF_TOKEN)
27
+ clip_model.eval()
28
+ logger.info("CLIP model loaded successfully")
29
+ except Exception as e:
30
+ logger.error(f"Failed to load CLIP model: {e}")
31
+ raise
32
+
33
+
34
+ @app.get("/")
35
+ async def root():
36
+ logger.info("Root endpoint accessed")
37
+ return {"message": "Welcome to the Image Embedding API."}
38
+
39
+
40
+ @app.post("/clip/process")
41
+ async def process_image(file: UploadFile = File(...)):
42
+ logger.info("Processing image")
43
+ image = Image.open(file.file).convert("RGB")
44
+ inputs = processor(images=image, return_tensors="pt")
45
+ with torch.no_grad():
46
+ embeddings = clip_model.get_image_features(**inputs)
47
+ return {"embedding": embeddings.tolist()}
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ transformers==4.49.0
2
+ fastapi==0.115.11
3
+ pydantic==2.10.6
4
+ torch==2.6.0
5
+ pillow==11.1.0
6
+ python-dotenv==1.0.1