manhteky123 commited on
Commit
c802cc8
·
verified ·
1 Parent(s): 0aea1cb

Upload 25 files

Browse files
Files changed (2) hide show
  1. app.py +87 -24
  2. start.sh +16 -1
app.py CHANGED
@@ -2,39 +2,73 @@ import os
2
  import io
3
  import sys
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  # Set cache directories before importing any ML libraries
6
  os.environ['TRANSFORMERS_CACHE'] = os.environ.get('TRANSFORMERS_CACHE', '/app/.cache/transformers')
7
  os.environ['HF_HOME'] = os.environ.get('HF_HOME', '/app/.cache/huggingface')
8
  os.environ['TORCH_HOME'] = os.environ.get('TORCH_HOME', '/app/.cache/torch')
9
  os.environ['HF_DATASETS_CACHE'] = os.environ.get('HF_DATASETS_CACHE', '/app/.cache/datasets')
 
10
 
11
  # Create cache directories if they don't exist
12
  for cache_dir in ['/app/.cache/transformers', '/app/.cache/huggingface', '/app/.cache/torch', '/app/.cache/datasets']:
13
  os.makedirs(cache_dir, exist_ok=True)
14
 
15
- import torch
16
- from flask import Flask, render_template, request, jsonify, url_for
17
- from PIL import Image
18
- import base64
19
- import logging
20
 
21
- # Safe import with error handling
22
  try:
 
 
 
 
23
  import numpy as np
 
 
 
 
 
 
 
24
  # Check numpy version compatibility
25
  numpy_version = np.__version__
26
- logging.info(f"NumPy version: {numpy_version}")
27
 
28
  from transformers import AutoTokenizer
 
 
 
 
 
 
29
  from blip2_vicuna_instruct import Blip2VicunaInstruct
 
 
30
  MODEL_AVAILABLE = True
31
- logging.info("All imports successful")
32
  except ImportError as e:
33
- logging.error(f"Model import failed: {e}")
 
34
  MODEL_AVAILABLE = False
35
  Blip2VicunaInstruct = None
36
  except Exception as e:
37
- logging.error(f"Unexpected error during import: {e}")
 
38
  MODEL_AVAILABLE = False
39
  Blip2VicunaInstruct = None
40
 
@@ -50,12 +84,23 @@ def load_model():
50
  global model, device
51
 
52
  if not MODEL_AVAILABLE:
53
- logging.error("Model is not available due to import errors")
54
  return
55
 
56
  try:
57
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
58
- logging.info(f"Using device: {device}")
 
 
 
 
 
 
 
 
 
 
 
59
 
60
  # Cấu hình model - có thể cần điều chỉnh theo config thực tế
61
  model_config = {
@@ -74,15 +119,17 @@ def load_model():
74
  "qformer_text_input": True,
75
  }
76
 
 
77
  # Khởi tạo model
78
  model = Blip2VicunaInstruct(**model_config)
79
  model.to(device)
80
  model.eval()
81
 
82
- logging.info("Model loaded successfully!")
83
 
84
  except Exception as e:
85
- logging.error(f"Error loading model: {str(e)}")
 
86
  model = None
87
 
88
  def preprocess_image(image):
@@ -107,7 +154,7 @@ def preprocess_image(image):
107
  return image_tensor
108
 
109
  except Exception as e:
110
- logging.error(f"Error preprocessing image: {str(e)}")
111
  return None
112
 
113
  def predict_emotion(image_tensor, prompt="What emotion is shown in this image?"):
@@ -139,7 +186,7 @@ def predict_emotion(image_tensor, prompt="What emotion is shown in this image?")
139
  return result[0] if result else "Unable to predict emotion"
140
 
141
  except Exception as e:
142
- logging.error(f"Error predicting emotion: {str(e)}")
143
  return f"Error: {str(e)}"
144
 
145
  @app.route('/')
@@ -152,7 +199,10 @@ def predict():
152
  """Handle image upload and prediction"""
153
  try:
154
  if not MODEL_AVAILABLE:
155
- return jsonify({'error': 'Model is not available due to import errors. Please check dependencies.'}), 500
 
 
 
156
 
157
  if 'image' not in request.files:
158
  return jsonify({'error': 'No image file provided'}), 400
@@ -161,6 +211,8 @@ def predict():
161
  if file.filename == '':
162
  return jsonify({'error': 'No image selected'}), 400
163
 
 
 
164
  # Đọc và xử lý image
165
  image = Image.open(io.BytesIO(file.read()))
166
 
@@ -174,8 +226,10 @@ def predict():
174
 
175
  # If model is not loaded, return a fallback response
176
  if model is None:
177
- emotion_result = "Model not loaded - unable to analyze emotion. This might be due to missing model weights or configuration issues."
 
178
  else:
 
179
  # Preprocess image
180
  image_tensor = preprocess_image(image)
181
  if image_tensor is None:
@@ -183,16 +237,19 @@ def predict():
183
 
184
  # Predict emotion
185
  emotion_result = predict_emotion(image_tensor, custom_prompt)
 
186
 
187
  return jsonify({
188
  'success': True,
189
  'emotion': emotion_result,
190
  'image': img_str,
191
- 'prompt': custom_prompt
 
 
192
  })
193
 
194
  except Exception as e:
195
- logging.error(f"Error in prediction: {str(e)}")
196
  return jsonify({'error': f'Prediction failed: {str(e)}'}), 500
197
 
198
  @app.route('/health')
@@ -206,15 +263,21 @@ def health():
206
  })
207
 
208
  if __name__ == '__main__':
209
- # Setup logging
210
- logging.basicConfig(level=logging.INFO)
211
 
212
  # Load model
213
- logging.info("Loading model...")
214
  load_model()
215
 
 
 
 
 
 
216
  # Determine port for Hugging Face Spaces
217
  port = int(os.environ.get("PORT", 7860))
 
218
 
219
- # Run app
220
  app.run(host="0.0.0.0", port=port, debug=False)
 
2
  import io
3
  import sys
4
 
5
+ # Set up logging early
6
+ import logging
7
+ logging.basicConfig(
8
+ level=logging.INFO,
9
+ format='%(asctime)s - %(levelname)s - %(message)s',
10
+ handlers=[
11
+ logging.StreamHandler(sys.stdout)
12
+ ]
13
+ )
14
+ logger = logging.getLogger(__name__)
15
+
16
+ # Add current directory to Python path
17
+ sys.path.insert(0, '/app/EmoVIT')
18
+ sys.path.insert(0, '/app/EmoVIT/lib')
19
+
20
  # Set cache directories before importing any ML libraries
21
  os.environ['TRANSFORMERS_CACHE'] = os.environ.get('TRANSFORMERS_CACHE', '/app/.cache/transformers')
22
  os.environ['HF_HOME'] = os.environ.get('HF_HOME', '/app/.cache/huggingface')
23
  os.environ['TORCH_HOME'] = os.environ.get('TORCH_HOME', '/app/.cache/torch')
24
  os.environ['HF_DATASETS_CACHE'] = os.environ.get('HF_DATASETS_CACHE', '/app/.cache/datasets')
25
+ os.environ['PYTHONUNBUFFERED'] = '1'
26
 
27
  # Create cache directories if they don't exist
28
  for cache_dir in ['/app/.cache/transformers', '/app/.cache/huggingface', '/app/.cache/torch', '/app/.cache/datasets']:
29
  os.makedirs(cache_dir, exist_ok=True)
30
 
31
+ logger.info("🔧 Environment setup complete")
32
+ logger.info(f"PYTHONPATH: {sys.path[:3]}")
 
 
 
33
 
34
+ # Import basic dependencies
35
  try:
36
+ import torch
37
+ from flask import Flask, render_template, request, jsonify, url_for
38
+ from PIL import Image
39
+ import base64
40
  import numpy as np
41
+ logger.info("✅ Basic dependencies loaded successfully")
42
+ except ImportError as e:
43
+ logger.error(f"❌ Failed to import basic dependencies: {e}")
44
+ sys.exit(1)
45
+
46
+ # Safe import with error handling for LAVIS
47
+ try:
48
  # Check numpy version compatibility
49
  numpy_version = np.__version__
50
+ logger.info(f"NumPy version: {numpy_version}")
51
 
52
  from transformers import AutoTokenizer
53
+ logger.info("✅ Transformers imported successfully")
54
+
55
+ # Try to import LAVIS components
56
+ import lavis
57
+ logger.info("✅ LAVIS base imported successfully")
58
+
59
  from blip2_vicuna_instruct import Blip2VicunaInstruct
60
+ logger.info("✅ Blip2VicunaInstruct imported successfully")
61
+
62
  MODEL_AVAILABLE = True
63
+ logger.info("All imports successful - Full model mode enabled")
64
  except ImportError as e:
65
+ logger.error(f"Model import failed: {e}")
66
+ logger.info("🔄 Running in demo mode without full model capabilities")
67
  MODEL_AVAILABLE = False
68
  Blip2VicunaInstruct = None
69
  except Exception as e:
70
+ logger.error(f"Unexpected error during import: {e}")
71
+ logger.info("🔄 Running in demo mode without full model capabilities")
72
  MODEL_AVAILABLE = False
73
  Blip2VicunaInstruct = None
74
 
 
84
  global model, device
85
 
86
  if not MODEL_AVAILABLE:
87
+ logger.warning("⚠️ Model is not available due to import errors")
88
  return
89
 
90
  try:
91
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
92
+ logger.info(f"🔧 Using device: {device}")
93
+
94
+ # Check if we have CUDA support
95
+ if torch.cuda.is_available():
96
+ logger.info(f"🎮 CUDA available: {torch.cuda.get_device_name(0)}")
97
+ else:
98
+ logger.info("🖥️ Running on CPU")
99
+
100
+ # For demo purposes, we'll skip actual model loading if LAVIS isn't available
101
+ if Blip2VicunaInstruct is None:
102
+ logger.warning("⚠️ Blip2VicunaInstruct class not available - skipping model load")
103
+ return
104
 
105
  # Cấu hình model - có thể cần điều chỉnh theo config thực tế
106
  model_config = {
 
119
  "qformer_text_input": True,
120
  }
121
 
122
+ logger.info("🔄 Initializing model...")
123
  # Khởi tạo model
124
  model = Blip2VicunaInstruct(**model_config)
125
  model.to(device)
126
  model.eval()
127
 
128
+ logger.info("Model loaded successfully!")
129
 
130
  except Exception as e:
131
+ logger.error(f"Error loading model: {str(e)}")
132
+ logger.info("🔄 Continuing in demo mode...")
133
  model = None
134
 
135
  def preprocess_image(image):
 
154
  return image_tensor
155
 
156
  except Exception as e:
157
+ logger.error(f"Error preprocessing image: {str(e)}")
158
  return None
159
 
160
  def predict_emotion(image_tensor, prompt="What emotion is shown in this image?"):
 
186
  return result[0] if result else "Unable to predict emotion"
187
 
188
  except Exception as e:
189
+ logger.error(f"Error predicting emotion: {str(e)}")
190
  return f"Error: {str(e)}"
191
 
192
  @app.route('/')
 
199
  """Handle image upload and prediction"""
200
  try:
201
  if not MODEL_AVAILABLE:
202
+ return jsonify({
203
+ 'error': 'Model is not available due to import errors. Please check dependencies.',
204
+ 'details': 'The application is running in demo mode. Full model functionality requires proper LAVIS installation.'
205
+ }), 500
206
 
207
  if 'image' not in request.files:
208
  return jsonify({'error': 'No image file provided'}), 400
 
211
  if file.filename == '':
212
  return jsonify({'error': 'No image selected'}), 400
213
 
214
+ logger.info(f"📷 Processing image: {file.filename}")
215
+
216
  # Đọc và xử lý image
217
  image = Image.open(io.BytesIO(file.read()))
218
 
 
226
 
227
  # If model is not loaded, return a fallback response
228
  if model is None:
229
+ emotion_result = "Model not loaded - unable to analyze emotion. This might be due to missing model weights or configuration issues. Running in demo mode."
230
+ logger.warning("⚠️ Model not available, returning demo response")
231
  else:
232
+ logger.info("🔄 Running model inference...")
233
  # Preprocess image
234
  image_tensor = preprocess_image(image)
235
  if image_tensor is None:
 
237
 
238
  # Predict emotion
239
  emotion_result = predict_emotion(image_tensor, custom_prompt)
240
+ logger.info(f"✅ Prediction complete: {emotion_result[:50]}...")
241
 
242
  return jsonify({
243
  'success': True,
244
  'emotion': emotion_result,
245
  'image': img_str,
246
+ 'prompt': custom_prompt,
247
+ 'model_available': MODEL_AVAILABLE,
248
+ 'model_loaded': model is not None
249
  })
250
 
251
  except Exception as e:
252
+ logger.error(f"Error in prediction: {str(e)}")
253
  return jsonify({'error': f'Prediction failed: {str(e)}'}), 500
254
 
255
  @app.route('/health')
 
263
  })
264
 
265
  if __name__ == '__main__':
266
+ # Setup logging (already done above, but ensure it's configured)
267
+ logger.info("🚀 Starting EmoVIT Flask application...")
268
 
269
  # Load model
270
+ logger.info("📝 Loading model...")
271
  load_model()
272
 
273
+ if MODEL_AVAILABLE and model is not None:
274
+ logger.info("✅ Model loaded successfully - Full functionality available")
275
+ else:
276
+ logger.warning("⚠️ Model not available - Running in demo mode")
277
+
278
  # Determine port for Hugging Face Spaces
279
  port = int(os.environ.get("PORT", 7860))
280
+ logger.info(f"🌐 Starting server on port {port}")
281
 
282
+ # Run app with proper logging
283
  app.run(host="0.0.0.0", port=port, debug=False)
start.sh CHANGED
@@ -19,12 +19,15 @@ export HF_HOME=${HF_HOME:-/app/.cache/huggingface}
19
  export TORCH_HOME=${TORCH_HOME:-/app/.cache/torch}
20
  export HF_DATASETS_CACHE=${HF_DATASETS_CACHE:-/app/.cache/datasets}
21
  export HUGGINGFACE_HUB_CACHE=${HUGGINGFACE_HUB_CACHE:-/app/.cache/huggingface/hub}
 
 
22
 
23
  echo "✅ Cache directories set up"
24
  echo "🔧 Environment variables:"
25
  echo " TRANSFORMERS_CACHE=$TRANSFORMERS_CACHE"
26
  echo " HF_HOME=$HF_HOME"
27
  echo " TORCH_HOME=$TORCH_HOME"
 
28
 
29
  # Test dependencies
30
  echo "🔍 Testing dependencies..."
@@ -38,25 +41,37 @@ try:
38
  print(f'✅ NumPy {numpy.__version__}')
39
  except Exception as e:
40
  print(f'❌ NumPy: {e}')
 
41
 
42
  try:
43
  import cv2
44
  print(f'✅ OpenCV {cv2.__version__}')
45
  except Exception as e:
46
  print(f'❌ OpenCV: {e}')
 
47
 
48
  try:
49
  import torch
50
  print(f'✅ PyTorch {torch.__version__}')
51
  except Exception as e:
52
  print(f'❌ PyTorch: {e}')
 
53
 
54
  try:
55
  import transformers
56
  print(f'✅ Transformers {transformers.__version__}')
57
  except Exception as e:
58
  print(f'❌ Transformers: {e}')
 
 
 
 
 
 
 
 
59
  "
60
 
61
  echo "🌐 Starting Flask application..."
62
- exec python3 app.py
 
 
19
  export TORCH_HOME=${TORCH_HOME:-/app/.cache/torch}
20
  export HF_DATASETS_CACHE=${HF_DATASETS_CACHE:-/app/.cache/datasets}
21
  export HUGGINGFACE_HUB_CACHE=${HUGGINGFACE_HUB_CACHE:-/app/.cache/huggingface/hub}
22
+ export PYTHONPATH=/app/EmoVIT:/app/EmoVIT/lib:$PYTHONPATH
23
+ export PYTHONUNBUFFERED=1
24
 
25
  echo "✅ Cache directories set up"
26
  echo "🔧 Environment variables:"
27
  echo " TRANSFORMERS_CACHE=$TRANSFORMERS_CACHE"
28
  echo " HF_HOME=$HF_HOME"
29
  echo " TORCH_HOME=$TORCH_HOME"
30
+ echo " PYTHONPATH=$PYTHONPATH"
31
 
32
  # Test dependencies
33
  echo "🔍 Testing dependencies..."
 
41
  print(f'✅ NumPy {numpy.__version__}')
42
  except Exception as e:
43
  print(f'❌ NumPy: {e}')
44
+ sys.exit(1)
45
 
46
  try:
47
  import cv2
48
  print(f'✅ OpenCV {cv2.__version__}')
49
  except Exception as e:
50
  print(f'❌ OpenCV: {e}')
51
+ sys.exit(1)
52
 
53
  try:
54
  import torch
55
  print(f'✅ PyTorch {torch.__version__}')
56
  except Exception as e:
57
  print(f'❌ PyTorch: {e}')
58
+ sys.exit(1)
59
 
60
  try:
61
  import transformers
62
  print(f'✅ Transformers {transformers.__version__}')
63
  except Exception as e:
64
  print(f'❌ Transformers: {e}')
65
+ sys.exit(1)
66
+
67
+ try:
68
+ import lavis
69
+ print(f'✅ LAVIS imported successfully')
70
+ except Exception as e:
71
+ print(f'❌ LAVIS: {e}')
72
+ print('Warning: LAVIS not available, app will run in demo mode')
73
  "
74
 
75
  echo "🌐 Starting Flask application..."
76
+ # Use unbuffered Python output for better logging in containers
77
+ exec python3 -u app.py