ButterM40 commited on
Commit
7304d60
·
1 Parent(s): 373cd0c

Fix adapter interference: create dedicated model instances per character

Browse files
Files changed (1) hide show
  1. backend/models/character_manager.py +10 -2
backend/models/character_manager.py CHANGED
@@ -259,8 +259,16 @@ class CharacterManager:
259
  with open(temp_config_file, 'w') as f:
260
  json.dump(config_data, f, indent=2)
261
 
 
 
 
 
 
 
 
 
262
  model_with_adapter = PeftModel.from_pretrained(
263
- self.base_model,
264
  temp_dir,
265
  adapter_name=character_id,
266
  is_trainable=False,
@@ -268,7 +276,7 @@ class CharacterManager:
268
  )
269
 
270
  self.character_models[character_id] = model_with_adapter
271
- logger.info(f"✅ Successfully loaded LoRA adapter for {character_id} with cleaned config")
272
 
273
  # Cleanup temp files
274
  shutil.rmtree(temp_dir, ignore_errors=True)
 
259
  with open(temp_config_file, 'w') as f:
260
  json.dump(config_data, f, indent=2)
261
 
262
+ # Create a fresh model instance for this character to avoid adapter conflicts
263
+ character_model = AutoModelForCausalLM.from_pretrained(
264
+ self.base_model.name_or_path if hasattr(self.base_model, 'name_or_path') else settings.BASE_MODEL,
265
+ torch_dtype=torch.float32,
266
+ trust_remote_code=True,
267
+ device_map="cpu" # Keep on CPU to avoid memory issues
268
+ )
269
+
270
  model_with_adapter = PeftModel.from_pretrained(
271
+ character_model,
272
  temp_dir,
273
  adapter_name=character_id,
274
  is_trainable=False,
 
276
  )
277
 
278
  self.character_models[character_id] = model_with_adapter
279
+ logger.info(f"✅ Successfully loaded LoRA adapter for {character_id} with dedicated model instance")
280
 
281
  # Cleanup temp files
282
  shutil.rmtree(temp_dir, ignore_errors=True)