shahzeb171 commited on
Commit
3414163
Β·
1 Parent(s): 8cdaf72

Debug path

Browse files
Files changed (1) hide show
  1. scripts/llm_service.py +5 -1
scripts/llm_service.py CHANGED
@@ -181,6 +181,10 @@ Always be helpful, accurate, and focused on the user's specific needs."""
181
  """Load model asynchronously to avoid blocking the UI"""
182
  def _load():
183
  try:
 
 
 
 
184
  logger.info(f"πŸ”„ Loading Qwen2.5-Coder model from {self.model_path}...")
185
  logger.info(f"βš™οΈ Configuration: n_ctx={self.n_ctx}, n_threads={self.n_threads}, n_gpu_layers={self.n_gpu_layers}")
186
 
@@ -198,7 +202,7 @@ Always be helpful, accurate, and focused on the user's specific needs."""
198
  # cache_dir=Path('models'),
199
  seed=42,
200
  n_ctx=self.n_ctx,
201
- verbose=False,
202
  n_gpu_layers=self.n_gpu_layers,
203
  n_threads=self.n_threads,
204
  )
 
181
  """Load model asynchronously to avoid blocking the UI"""
182
  def _load():
183
  try:
184
+ if os.path.exists(self.model_path):
185
+ logger.info(f"βœ… Found model at {self.model_path}. Loading Qwen2.5-Coder...")
186
+ else:
187
+ logger.error(f"❌ Model not found at {self.model_path}. Please check the path.")
188
  logger.info(f"πŸ”„ Loading Qwen2.5-Coder model from {self.model_path}...")
189
  logger.info(f"βš™οΈ Configuration: n_ctx={self.n_ctx}, n_threads={self.n_threads}, n_gpu_layers={self.n_gpu_layers}")
190
 
 
202
  # cache_dir=Path('models'),
203
  seed=42,
204
  n_ctx=self.n_ctx,
205
+ # verbose=False,
206
  n_gpu_layers=self.n_gpu_layers,
207
  n_threads=self.n_threads,
208
  )