Spaces:
Running
Running
Fix base model to Qwen2.5-0.5B and clean LoRA adapter config compatibility
Browse files- backend/config.py +2 -2
- backend/models/character_manager.py +45 -37
backend/config.py
CHANGED
|
@@ -18,8 +18,8 @@ class Settings(BaseSettings):
|
|
| 18 |
API_PORT: int = int(os.getenv("API_PORT", "8000"))
|
| 19 |
DEBUG: bool = os.getenv("DEBUG", "True").lower() == "true"
|
| 20 |
|
| 21 |
-
# Model Configuration -
|
| 22 |
-
BASE_MODEL: str = os.getenv("BASE_MODEL", "Qwen/Qwen2.5-
|
| 23 |
DEVICE: str = os.getenv("DEVICE", "cpu") # Default to CPU for Spaces
|
| 24 |
MAX_LENGTH: int = int(os.getenv("MAX_LENGTH", "2048"))
|
| 25 |
TEMPERATURE: float = float(os.getenv("TEMPERATURE", "0.7"))
|
|
|
|
| 18 |
API_PORT: int = int(os.getenv("API_PORT", "8000"))
|
| 19 |
DEBUG: bool = os.getenv("DEBUG", "True").lower() == "true"
|
| 20 |
|
| 21 |
+
# Model Configuration - Match your local Qwen3 model
|
| 22 |
+
BASE_MODEL: str = os.getenv("BASE_MODEL", "Qwen/Qwen2.5-0.5B-Instruct")
|
| 23 |
DEVICE: str = os.getenv("DEVICE", "cpu") # Default to CPU for Spaces
|
| 24 |
MAX_LENGTH: int = int(os.getenv("MAX_LENGTH", "2048"))
|
| 25 |
TEMPERATURE: float = float(os.getenv("TEMPERATURE", "0.7"))
|
backend/models/character_manager.py
CHANGED
|
@@ -167,52 +167,60 @@ class CharacterManager:
|
|
| 167 |
|
| 168 |
# Try loading with compatibility fixes
|
| 169 |
try:
|
| 170 |
-
# First
|
| 171 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
model_with_adapter = PeftModel.from_pretrained(
|
| 173 |
self.base_model,
|
| 174 |
-
|
| 175 |
adapter_name=character_id,
|
| 176 |
is_trainable=False,
|
| 177 |
-
torch_dtype=torch.float32,
|
| 178 |
)
|
|
|
|
| 179 |
self.character_models[character_id] = model_with_adapter
|
| 180 |
-
logger.info(f"β
Successfully loaded LoRA adapter for {character_id}
|
|
|
|
|
|
|
|
|
|
| 181 |
|
| 182 |
except Exception as e1:
|
| 183 |
-
logger.warning(f"
|
| 184 |
|
| 185 |
-
#
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
settings.BASE_MODEL,
|
| 190 |
-
torch_dtype=torch.float32, # Force float32 for compatibility
|
| 191 |
-
device_map=None, # No device mapping for compatibility
|
| 192 |
-
trust_remote_code=True,
|
| 193 |
-
low_cpu_mem_usage=True,
|
| 194 |
-
use_cache=False # Disable cache for compatibility
|
| 195 |
-
)
|
| 196 |
-
|
| 197 |
-
# Load adapter with strict=False for compatibility
|
| 198 |
-
model_with_adapter = PeftModel.from_pretrained(
|
| 199 |
-
character_base_model,
|
| 200 |
-
adapter_path,
|
| 201 |
-
adapter_name=character_id,
|
| 202 |
-
is_trainable=False,
|
| 203 |
-
torch_dtype=torch.float32,
|
| 204 |
-
)
|
| 205 |
-
|
| 206 |
-
self.character_models[character_id] = model_with_adapter
|
| 207 |
-
logger.info(f"β
Successfully loaded LoRA adapter for {character_id} (separate model)")
|
| 208 |
-
|
| 209 |
-
except Exception as e2:
|
| 210 |
-
logger.warning(f"Separate model approach failed for {character_id}: {e2}")
|
| 211 |
-
|
| 212 |
-
# Final fallback: Use base model only with enhanced character prompts
|
| 213 |
-
logger.info(f"Using base model fallback for {character_id}")
|
| 214 |
-
self.character_models[character_id] = self.base_model
|
| 215 |
-
logger.info(f"β οΈ Using base model fallback for {character_id} - character behavior will rely on prompts only")
|
| 216 |
|
| 217 |
except Exception as e:
|
| 218 |
logger.error(f"β Complete failure loading LoRA adapter for {character_id}: {e}")
|
|
|
|
| 167 |
|
| 168 |
# Try loading with compatibility fixes
|
| 169 |
try:
|
| 170 |
+
# First: Fix the adapter config to remove incompatible parameters
|
| 171 |
+
import json
|
| 172 |
+
config_file = os.path.join(adapter_path, "adapter_config.json")
|
| 173 |
+
|
| 174 |
+
with open(config_file, 'r') as f:
|
| 175 |
+
config_data = json.load(f)
|
| 176 |
+
|
| 177 |
+
# Remove problematic parameters that cause LoraConfig errors
|
| 178 |
+
problematic_params = [
|
| 179 |
+
'alora_invocation_tokens', 'arrow_config',
|
| 180 |
+
'ensure_weight_tying', 'peft_version'
|
| 181 |
+
]
|
| 182 |
+
|
| 183 |
+
for param in problematic_params:
|
| 184 |
+
if param in config_data:
|
| 185 |
+
logger.info(f"Removing incompatible parameter: {param}")
|
| 186 |
+
del config_data[param]
|
| 187 |
+
|
| 188 |
+
# Write cleaned config to temp file
|
| 189 |
+
import tempfile
|
| 190 |
+
temp_dir = tempfile.mkdtemp()
|
| 191 |
+
temp_config_file = os.path.join(temp_dir, "adapter_config.json")
|
| 192 |
+
|
| 193 |
+
with open(temp_config_file, 'w') as f:
|
| 194 |
+
json.dump(config_data, f, indent=2)
|
| 195 |
+
|
| 196 |
+
# Copy adapter model to temp directory
|
| 197 |
+
import shutil
|
| 198 |
+
temp_model_file = os.path.join(temp_dir, "adapter_model.safetensors")
|
| 199 |
+
shutil.copy2(os.path.join(adapter_path, "adapter_model.safetensors"), temp_model_file)
|
| 200 |
+
|
| 201 |
+
# Load with cleaned config
|
| 202 |
+
logger.info(f"Loading LoRA adapter with cleaned config for {character_id}")
|
| 203 |
model_with_adapter = PeftModel.from_pretrained(
|
| 204 |
self.base_model,
|
| 205 |
+
temp_dir,
|
| 206 |
adapter_name=character_id,
|
| 207 |
is_trainable=False,
|
| 208 |
+
torch_dtype=torch.float32,
|
| 209 |
)
|
| 210 |
+
|
| 211 |
self.character_models[character_id] = model_with_adapter
|
| 212 |
+
logger.info(f"β
Successfully loaded LoRA adapter for {character_id} with cleaned config")
|
| 213 |
+
|
| 214 |
+
# Cleanup temp files
|
| 215 |
+
shutil.rmtree(temp_dir, ignore_errors=True)
|
| 216 |
|
| 217 |
except Exception as e1:
|
| 218 |
+
logger.warning(f"LoRA loading failed for {character_id}: {e1}")
|
| 219 |
|
| 220 |
+
# Ultimate fallback: Use base model only with enhanced character prompts
|
| 221 |
+
logger.info(f"Using base model fallback for {character_id}")
|
| 222 |
+
self.character_models[character_id] = self.base_model
|
| 223 |
+
logger.info(f"β οΈ Using base model fallback for {character_id} - character behavior will rely on prompts only")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 224 |
|
| 225 |
except Exception as e:
|
| 226 |
logger.error(f"β Complete failure loading LoRA adapter for {character_id}: {e}")
|