| { | |
| "model_type": "crane-ai", | |
| "architectures": [ | |
| "CRANEAIModel" | |
| ], | |
| "crane_version": "1.0.0", | |
| "modules": { | |
| "code_module": { | |
| "base_model": "deepseek-ai/deepseek-coder-1.3b-instruct", | |
| "task": "code_generation", | |
| "max_tokens": 2048, | |
| "temperature": 0.1 | |
| }, | |
| "chat_module": { | |
| "base_model": "Qwen/Qwen2.5-1.5B-Instruct", | |
| "task": "chat", | |
| "max_tokens": 1024, | |
| "temperature": 0.7 | |
| }, | |
| "reason_module": { | |
| "base_model": "microsoft/Phi-3-mini-4k-instruct", | |
| "task": "reasoning", | |
| "max_tokens": 1024, | |
| "temperature": 0.3 | |
| }, | |
| "fast_module": { | |
| "base_model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", | |
| "task": "quick_response", | |
| "max_tokens": 512, | |
| "temperature": 0.8 | |
| } | |
| }, | |
| "router_config": { | |
| "confidence_threshold": 0.6, | |
| "max_concurrent_requests": 4, | |
| "timeout": 30, | |
| "fallback_model": "fast_module" | |
| }, | |
| "training": { | |
| "supports_fine_tuning": true, | |
| "fine_tuning_method": "LoRA", | |
| "training_script": "training/fine_tune.py" | |
| } | |
| } |