manhteky123 commited on
Commit
70cd26e
·
verified ·
1 Parent(s): f72c724

Upload 24 files

Browse files
Files changed (4) hide show
  1. Dockerfile +50 -50
  2. app.py +29 -11
  3. requirements.txt +4 -4
  4. test_deps.py +79 -0
Dockerfile CHANGED
@@ -1,51 +1,51 @@
1
- FROM python:3.9-slim
2
-
3
- # Set working directory
4
- WORKDIR /app
5
-
6
- # Install system dependencies
7
- RUN apt-get update && apt-get install -y \
8
- git \
9
- wget \
10
- curl \
11
- build-essential \
12
- libgl1-mesa-dev \
13
- libglib2.0-0 \
14
- libsm6 \
15
- libxext6 \
16
- libxrender-dev \
17
- libgomp1 \
18
- libgcc-s1 \
19
- python3-opencv \
20
- ffmpeg \
21
- libsm6 \
22
- libxext6 \
23
- && rm -rf /var/lib/apt/lists/*
24
-
25
- # Copy requirements first to leverage Docker cache
26
- COPY requirements.txt .
27
-
28
- # Install Python dependencies
29
- RUN pip install --no-cache-dir --upgrade pip && \
30
- pip install --no-cache-dir -r requirements.txt
31
-
32
- # Copy application code
33
- COPY . .
34
-
35
- # Create necessary directories
36
- RUN mkdir -p static/css templates
37
-
38
- # Set environment variables for Hugging Face Spaces
39
- ENV PYTHONPATH=/app
40
- ENV FLASK_APP=app.py
41
- ENV FLASK_ENV=production
42
-
43
- # Expose port for Hugging Face Spaces
44
- EXPOSE 7860
45
-
46
- # Health check
47
- HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
48
- CMD curl -f http://localhost:7860/health || exit 1
49
-
50
- # Command to run the application
51
  CMD ["python", "app.py"]
 
1
+ FROM python:3.9-slim
2
+
3
+ # Set working directory
4
+ WORKDIR /app
5
+
6
+ # Install system dependencies
7
+ RUN apt-get update && apt-get install -y \
8
+ git \
9
+ wget \
10
+ curl \
11
+ build-essential \
12
+ libgl1-mesa-dev \
13
+ libglib2.0-0 \
14
+ libsm6 \
15
+ libxext6 \
16
+ libxrender-dev \
17
+ libgomp1 \
18
+ libgcc-s1 \
19
+ ffmpeg \
20
+ libgtk-3-dev \
21
+ && rm -rf /var/lib/apt/lists/*
22
+
23
+ # Copy requirements first to leverage Docker cache
24
+ COPY requirements.txt .
25
+
26
+ # Install Python dependencies with specific order
27
+ RUN pip install --no-cache-dir --upgrade pip && \
28
+ pip install --no-cache-dir numpy==1.21.6 && \
29
+ pip install --no-cache-dir opencv-python-headless==4.8.1.78 && \
30
+ pip install --no-cache-dir -r requirements.txt
31
+
32
+ # Copy application code
33
+ COPY . .
34
+
35
+ # Create necessary directories
36
+ RUN mkdir -p static/css templates
37
+
38
+ # Set environment variables for Hugging Face Spaces
39
+ ENV PYTHONPATH=/app
40
+ ENV FLASK_APP=app.py
41
+ ENV FLASK_ENV=production
42
+
43
+ # Expose port for Hugging Face Spaces
44
+ EXPOSE 7860
45
+
46
+ # Health check
47
+ HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
48
+ CMD curl -f http://localhost:7860/health || exit 1
49
+
50
+ # Command to run the application
51
  CMD ["python", "app.py"]
app.py CHANGED
@@ -4,11 +4,17 @@ import torch
4
  from flask import Flask, render_template, request, jsonify, url_for
5
  from PIL import Image
6
  import base64
7
- from transformers import AutoTokenizer
8
  import logging
9
 
10
- # Import model từ file hiện tại
11
- from blip2_vicuna_instruct import Blip2VicunaInstruct
 
 
 
 
 
 
 
12
 
13
  app = Flask(__name__)
14
  app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB max file size
@@ -21,6 +27,10 @@ def load_model():
21
  """Load BLIP2 Vicuna model"""
22
  global model, device
23
 
 
 
 
 
24
  try:
25
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
26
  logging.info(f"Using device: {device}")
@@ -119,6 +129,9 @@ def index():
119
  def predict():
120
  """Handle image upload and prediction"""
121
  try:
 
 
 
122
  if 'image' not in request.files:
123
  return jsonify({'error': 'No image file provided'}), 400
124
 
@@ -129,22 +142,26 @@ def predict():
129
  # Đọc và xử lý image
130
  image = Image.open(io.BytesIO(file.read()))
131
 
132
- # Preprocess image
133
- image_tensor = preprocess_image(image)
134
- if image_tensor is None:
135
- return jsonify({'error': 'Failed to process image'}), 400
136
-
137
  # Get custom prompt if provided
138
  custom_prompt = request.form.get('prompt', 'What emotion is shown in this image?')
139
 
140
- # Predict emotion
141
- emotion_result = predict_emotion(image_tensor, custom_prompt)
142
-
143
  # Convert image to base64 for display
144
  buffered = io.BytesIO()
145
  image.save(buffered, format="PNG")
146
  img_str = base64.b64encode(buffered.getvalue()).decode()
147
 
 
 
 
 
 
 
 
 
 
 
 
 
148
  return jsonify({
149
  'success': True,
150
  'emotion': emotion_result,
@@ -161,6 +178,7 @@ def health():
161
  """Health check endpoint"""
162
  return jsonify({
163
  'status': 'healthy',
 
164
  'model_loaded': model is not None,
165
  'device': str(device) if device else 'unknown'
166
  })
 
4
  from flask import Flask, render_template, request, jsonify, url_for
5
  from PIL import Image
6
  import base64
 
7
  import logging
8
 
9
+ # Safe import with error handling
10
+ try:
11
+ from transformers import AutoTokenizer
12
+ from blip2_vicuna_instruct import Blip2VicunaInstruct
13
+ MODEL_AVAILABLE = True
14
+ except ImportError as e:
15
+ logging.error(f"Model import failed: {e}")
16
+ MODEL_AVAILABLE = False
17
+ Blip2VicunaInstruct = None
18
 
19
  app = Flask(__name__)
20
  app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB max file size
 
27
  """Load BLIP2 Vicuna model"""
28
  global model, device
29
 
30
+ if not MODEL_AVAILABLE:
31
+ logging.error("Model is not available due to import errors")
32
+ return
33
+
34
  try:
35
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
36
  logging.info(f"Using device: {device}")
 
129
  def predict():
130
  """Handle image upload and prediction"""
131
  try:
132
+ if not MODEL_AVAILABLE:
133
+ return jsonify({'error': 'Model is not available due to import errors. Please check dependencies.'}), 500
134
+
135
  if 'image' not in request.files:
136
  return jsonify({'error': 'No image file provided'}), 400
137
 
 
142
  # Đọc và xử lý image
143
  image = Image.open(io.BytesIO(file.read()))
144
 
 
 
 
 
 
145
  # Get custom prompt if provided
146
  custom_prompt = request.form.get('prompt', 'What emotion is shown in this image?')
147
 
 
 
 
148
  # Convert image to base64 for display
149
  buffered = io.BytesIO()
150
  image.save(buffered, format="PNG")
151
  img_str = base64.b64encode(buffered.getvalue()).decode()
152
 
153
+ # If model is not loaded, return a fallback response
154
+ if model is None:
155
+ emotion_result = "Model not loaded - unable to analyze emotion. This might be due to missing model weights or configuration issues."
156
+ else:
157
+ # Preprocess image
158
+ image_tensor = preprocess_image(image)
159
+ if image_tensor is None:
160
+ return jsonify({'error': 'Failed to process image'}), 400
161
+
162
+ # Predict emotion
163
+ emotion_result = predict_emotion(image_tensor, custom_prompt)
164
+
165
  return jsonify({
166
  'success': True,
167
  'emotion': emotion_result,
 
178
  """Health check endpoint"""
179
  return jsonify({
180
  'status': 'healthy',
181
+ 'model_available': MODEL_AVAILABLE,
182
  'model_loaded': model is not None,
183
  'device': str(device) if device else 'unknown'
184
  })
requirements.txt CHANGED
@@ -2,21 +2,21 @@
2
  Flask==2.3.3
3
  gunicorn==21.2.0
4
 
5
- # Core ML Libraries
6
  torch>=1.10.0
7
  torchvision>=0.11.0
8
  transformers>=4.28.0
9
  pillow>=10.0.0
10
 
11
- # Image Processing
12
- opencv-python-headless>=4.5.0
 
13
 
14
  # LAVIS dependencies (for BLIP2)
15
  salesforce-lavis
16
  omegaconf>=2.3.0
17
 
18
  # Data handling
19
- numpy>=1.24.0
20
  pandas>=2.0.0
21
 
22
  # Other utilities
 
2
  Flask==2.3.3
3
  gunicorn==21.2.0
4
 
5
+ # Core ML Libraries
6
  torch>=1.10.0
7
  torchvision>=0.11.0
8
  transformers>=4.28.0
9
  pillow>=10.0.0
10
 
11
+ # Image Processing (pinned versions for compatibility)
12
+ numpy==1.21.6
13
+ opencv-python-headless==4.8.1.78
14
 
15
  # LAVIS dependencies (for BLIP2)
16
  salesforce-lavis
17
  omegaconf>=2.3.0
18
 
19
  # Data handling
 
20
  pandas>=2.0.0
21
 
22
  # Other utilities
test_deps.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test script để kiểm tra dependencies và model loading
4
+ """
5
+
6
+ import sys
7
+ import importlib
8
+
9
+ def test_dependencies():
10
+ """Test các dependencies cơ bản"""
11
+ required_packages = [
12
+ 'flask',
13
+ 'torch',
14
+ 'torchvision',
15
+ 'transformers',
16
+ 'PIL',
17
+ 'numpy',
18
+ 'cv2'
19
+ ]
20
+
21
+ results = {}
22
+
23
+ for package in required_packages:
24
+ try:
25
+ if package == 'PIL':
26
+ importlib.import_module('PIL')
27
+ elif package == 'cv2':
28
+ importlib.import_module('cv2')
29
+ else:
30
+ importlib.import_module(package)
31
+ results[package] = "✅ OK"
32
+ except ImportError as e:
33
+ results[package] = f"❌ FAILED: {e}"
34
+
35
+ return results
36
+
37
+ def test_model_import():
38
+ """Test import model"""
39
+ try:
40
+ from blip2_vicuna_instruct import Blip2VicunaInstruct
41
+ return "✅ Model import successful"
42
+ except ImportError as e:
43
+ return f"❌ Model import failed: {e}"
44
+
45
+ def test_basic_functionality():
46
+ """Test basic Flask app functionality"""
47
+ try:
48
+ from flask import Flask
49
+ app = Flask(__name__)
50
+ return "✅ Flask app creation successful"
51
+ except Exception as e:
52
+ return f"❌ Flask app creation failed: {e}"
53
+
54
+ if __name__ == "__main__":
55
+ print("🔍 Testing EmoVIT Dependencies...")
56
+ print("=" * 50)
57
+
58
+ # Test dependencies
59
+ print("\n📦 Dependency Check:")
60
+ deps = test_dependencies()
61
+ for package, status in deps.items():
62
+ print(f" {package}: {status}")
63
+
64
+ # Test model import
65
+ print(f"\n🤖 Model Import: {test_model_import()}")
66
+
67
+ # Test Flask
68
+ print(f"\n🌐 Flask Test: {test_basic_functionality()}")
69
+
70
+ # Summary
71
+ failed_deps = [pkg for pkg, status in deps.items() if "❌" in status]
72
+
73
+ if failed_deps:
74
+ print(f"\n❌ Issues found with: {', '.join(failed_deps)}")
75
+ print("Please check the requirements.txt and Dockerfile")
76
+ sys.exit(1)
77
+ else:
78
+ print("\n✅ All basic dependencies OK!")
79
+ print("You can try running: python app.py")