kazuhina commited on
Commit
d076e1d
·
1 Parent(s): 058a676

Fix model loading error: Add proper fallback handling for llava_model

Browse files
Files changed (1) hide show
  1. joycaption_app.py +12 -8
joycaption_app.py CHANGED
@@ -16,17 +16,16 @@ from pathlib import Path
16
 
17
  # Initialize the JoyCaption model
18
  print("Loading JoyCaption model...")
 
 
19
  try:
20
- # Model configuration for optimal performance
21
- model_name = "fancyfeast/llama-joycaption-alpha-two-hf-llava"
22
-
23
  # Load processor and model with correct configuration
24
  processor = AutoProcessor.from_pretrained(model_name)
25
 
26
  # Load model with bfloat16 (native dtype of Llama 3.1)
27
  llava_model = LlavaForConditionalGeneration.from_pretrained(
28
- model_name,
29
- torch_dtype="bfloat16",
30
  device_map="auto" if torch.cuda.is_available() else None
31
  )
32
  llava_model.eval()
@@ -35,9 +34,10 @@ try:
35
 
36
  except Exception as e:
37
  print(f"Error loading model: {e}")
38
- # Create a fallback function for when model loading fails
39
- def process_image_with_caption(*args, **kwargs):
40
- return "Error: Model not loaded. Please check the model availability."
 
41
 
42
  @spaces.GPU
43
  def generate_image_caption(image_file, prompt_type="formal_detailed", custom_prompt=""):
@@ -52,6 +52,10 @@ def generate_image_caption(image_file, prompt_type="formal_detailed", custom_pro
52
  Returns:
53
  str: Generated image caption
54
  """
 
 
 
 
55
  try:
56
  if not image_file:
57
  return "Please upload an image file."
 
16
 
17
  # Initialize the JoyCaption model
18
  print("Loading JoyCaption model...")
19
+ model_name = "fancyfeast/llama-joycaption-alpha-two-hf-llava"
20
+
21
  try:
 
 
 
22
  # Load processor and model with correct configuration
23
  processor = AutoProcessor.from_pretrained(model_name)
24
 
25
  # Load model with bfloat16 (native dtype of Llama 3.1)
26
  llava_model = LlavaForConditionalGeneration.from_pretrained(
27
+ model_name,
28
+ torch_dtype="bfloat16",
29
  device_map="auto" if torch.cuda.is_available() else None
30
  )
31
  llava_model.eval()
 
34
 
35
  except Exception as e:
36
  print(f"Error loading model: {e}")
37
+ # Create fallback objects when model loading fails
38
+ processor = None
39
+ llava_model = None
40
+ print("Using fallback mode - model not available")
41
 
42
  @spaces.GPU
43
  def generate_image_caption(image_file, prompt_type="formal_detailed", custom_prompt=""):
 
52
  Returns:
53
  str: Generated image caption
54
  """
55
+ # Check if model is available
56
+ if llava_model is None or processor is None:
57
+ return "Error: JoyCaption model not loaded. Please check the model availability and try again."
58
+
59
  try:
60
  if not image_file:
61
  return "Please upload an image file."