khushalcodiste commited on
Commit
d4a7af6
·
1 Parent(s): 295de8e

fix: added

Browse files
Files changed (2) hide show
  1. Dockerfile +0 -1
  2. app/main.py +10 -3
Dockerfile CHANGED
@@ -4,7 +4,6 @@ ENV PYTHONDONTWRITEBYTECODE=1 \
4
  PYTHONUNBUFFERED=1 \
5
  PIP_NO_CACHE_DIR=1 \
6
  HF_HOME=/data/.huggingface \
7
- TRANSFORMERS_CACHE=/data/.huggingface/hub \
8
  PORT=7860
9
 
10
  WORKDIR /code
 
4
  PYTHONUNBUFFERED=1 \
5
  PIP_NO_CACHE_DIR=1 \
6
  HF_HOME=/data/.huggingface \
 
7
  PORT=7860
8
 
9
  WORKDIR /code
app/main.py CHANGED
@@ -6,12 +6,17 @@ from contextlib import asynccontextmanager
6
  from fastapi import FastAPI, File, Form, HTTPException, UploadFile
7
  from fastapi.responses import JSONResponse
8
  from PIL import Image, UnidentifiedImageError
9
- from transformers import BlipForConditionalGeneration, BlipProcessor
10
 
11
  Image.MAX_IMAGE_PIXELS = None
12
 
13
  MODEL_ID = os.getenv("MODEL_ID", "Salesforce/blip-image-captioning-large")
14
- USE_FAST_PROCESSOR = True
 
 
 
 
 
15
  MAX_IMAGE_SIZE = (1024, 1024)
16
 
17
  processor: BlipProcessor | None = None
@@ -21,7 +26,9 @@ model: BlipForConditionalGeneration | None = None
21
  def load_model() -> None:
22
  global processor, model
23
  if processor is None:
24
- processor = BlipProcessor.from_pretrained(MODEL_ID, use_fast=USE_FAST_PROCESSOR)
 
 
25
  if model is None:
26
  model = BlipForConditionalGeneration.from_pretrained(MODEL_ID)
27
 
 
6
  from fastapi import FastAPI, File, Form, HTTPException, UploadFile
7
  from fastapi.responses import JSONResponse
8
  from PIL import Image, UnidentifiedImageError
9
+ from transformers import AutoTokenizer, BlipForConditionalGeneration, BlipImageProcessor, BlipProcessor
10
 
11
  Image.MAX_IMAGE_PIXELS = None
12
 
13
  MODEL_ID = os.getenv("MODEL_ID", "Salesforce/blip-image-captioning-large")
14
+ USE_FAST_PROCESSOR = os.getenv("USE_FAST_PROCESSOR", "true").strip().lower() in {
15
+ "1",
16
+ "true",
17
+ "yes",
18
+ "on",
19
+ }
20
  MAX_IMAGE_SIZE = (1024, 1024)
21
 
22
  processor: BlipProcessor | None = None
 
26
  def load_model() -> None:
27
  global processor, model
28
  if processor is None:
29
+ image_processor = BlipImageProcessor.from_pretrained(MODEL_ID)
30
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=USE_FAST_PROCESSOR)
31
+ processor = BlipProcessor(image_processor=image_processor, tokenizer=tokenizer)
32
  if model is None:
33
  model = BlipForConditionalGeneration.from_pretrained(MODEL_ID)
34