hamxaameer commited on
Commit
eb6ce9a
·
1 Parent(s): a0c2787

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -14
app.py CHANGED
@@ -34,21 +34,51 @@ def load_model_from_pickle(pickle_path="best_model.pkl"):
34
  # Check if file exists
35
  if not os.path.exists(pickle_path):
36
  return f"❌ Model file not found: {pickle_path}\n\nPlease ensure best_model.pkl is uploaded to the HuggingFace Space."
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
- # Load pickle file
39
- with open(pickle_path, 'rb') as f:
40
- model_package = pickle.load(f)
41
-
42
- loaded_model = model_package['model']
43
- loaded_tokenizer = model_package['tokenizer']
44
- loaded_config = model_package['config']
45
-
46
- # Set model to evaluation mode
47
- loaded_model.eval()
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
- # Move to appropriate device
50
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
51
- loaded_model = loaded_model.to(device)
 
 
 
 
 
52
 
53
  config_info = f"""✅ Model loaded successfully!
54
 
@@ -70,7 +100,18 @@ def load_model_from_pickle(pickle_path="best_model.pkl"):
70
  return config_info
71
 
72
  except Exception as e:
73
- return f"❌ Error loading model: {str(e)}\n\nPlease ensure best_model.pkl is properly uploaded to the Space."
 
 
 
 
 
 
 
 
 
 
 
74
 
75
  def calculate_bleu_score(reference, hypothesis):
76
  """Calculate BLEU score between reference and generated code"""
 
34
  # Check if file exists
35
  if not os.path.exists(pickle_path):
36
  return f"❌ Model file not found: {pickle_path}\n\nPlease ensure best_model.pkl is uploaded to the HuggingFace Space."
37
+ # Try to load using torch.load which supports map_location for CPU-only machines
38
+ try:
39
+ if torch.cuda.is_available():
40
+ model_package = torch.load(pickle_path)
41
+ else:
42
+ model_package = torch.load(pickle_path, map_location=torch.device('cpu'))
43
+ except RuntimeError as rte:
44
+ # Common error when a GPU-saved object is loaded on CPU-only machine
45
+ if 'Attempting to deserialize object on a CUDA device' in str(rte):
46
+ # Retry mapping to CPU
47
+ model_package = torch.load(pickle_path, map_location=torch.device('cpu'))
48
+ else:
49
+ raise
50
 
51
+ # Handle a few common package shapes.
52
+ if isinstance(model_package, dict):
53
+ loaded_model = model_package.get('model', None)
54
+ loaded_tokenizer = model_package.get('tokenizer', None)
55
+ loaded_config = model_package.get('config', {}) or {}
56
+ else:
57
+ # Unknown package format: assume the object itself is the model
58
+ loaded_model = model_package
59
+ loaded_tokenizer = None
60
+ loaded_config = {}
61
+
62
+ # If user saved a state_dict instead of a model object, provide guidance
63
+ if isinstance(loaded_model, dict) and 'state_dict' in loaded_model:
64
+ # the file contains something like {'state_dict': ...}
65
+ return ("❌ The pickle appears to contain a state_dict rather than a full model object. "
66
+ "This app expects a pickled model object (model instance).\n"
67
+ "If you only have a state_dict, re-create the model architecture and load the state_dict before pickling, "
68
+ "or provide a pickled model object saved with torch.save(model, path).")
69
+
70
+ if loaded_model is None:
71
+ return ("❌ No model object found inside the pickle. Please ensure the pickle contains a dict with keys "
72
+ "'model', 'tokenizer', and 'config' (or the model object itself).")
73
 
74
+ # Set model to evaluation mode and move to appropriate device
75
+ try:
76
+ loaded_model.eval()
77
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
78
+ loaded_model = loaded_model.to(device)
79
+ except Exception as e:
80
+ return (f"❌ Error preparing model for inference: {str(e)}\n\n"
81
+ "This can happen if the saved object is not a proper torch.nn.Module or if tensors couldn't be mapped to the current device.")
82
 
83
  config_info = f"""✅ Model loaded successfully!
84
 
 
100
  return config_info
101
 
102
  except Exception as e:
103
+ # Specific hint for CUDA->CPU deserialization issues
104
+ err = str(e)
105
+ if 'Attempting to deserialize object on a CUDA device' in err:
106
+ return ("❌ Error loading model: The file was saved for a GPU device but this runtime has no CUDA available. "
107
+ "The loader attempted to remap tensors to CPU, but loading still failed.\n\n"
108
+ "Try re-saving the model on a CPU by running:\n"
109
+ "```python\n"
110
+ "model.to('cpu')\n"
111
+ "torch.save({'model': model, 'tokenizer': tokenizer, 'config': config}, 'best_model.pkl')\n"
112
+ "```\n\n"
113
+ "Or upload a CPU-compatible `best_model.pkl` to the Space and retry.")
114
+ return f"❌ Error loading model: {err}\n\nPlease ensure best_model.pkl is properly uploaded to the Space and is CPU-compatible."
115
 
116
  def calculate_bleu_score(reference, hypothesis):
117
  """Calculate BLEU score between reference and generated code"""