Mango-Metrics-NLM
feat: Phi-3.5-MoE multi-agent model repository
c8b77b5
"""
Gradient Descent and Backpropagation Training System
==================================================
This module provides a comprehensive implementation of gradient descent optimization
algorithms and backpropagation for neural network training, specifically designed
for the MangoMAS multi-agent system.
Key Components:
- Optimizers: SGD, Adam, AdamW with proper mathematical implementations
- Backpropagation: Chain rule-based gradient computation
- Training Loop: Complete training orchestration with monitoring
- Loss Functions: Various loss implementations for different tasks
- Monitoring: Comprehensive gradient and training metrics tracking
Usage:
from src.training.gradient_descent import GradientDescentTrainer
trainer = GradientDescentTrainer()
results = trainer.train_agent(agent_spec)
"""
from .optimizers import SGD, Adam, AdamW, Optimizer
from .backpropagation import BackpropagationEngine
from .training_loop import GradientDescentTrainer
from .loss_functions import CrossEntropyLoss, KLDivergenceLoss, LossFunction
from .monitoring import GradientMonitor, TrainingMonitor
from .model_wrapper import ModelWrapper
from .schedulers import LinearScheduler, CosineScheduler, StepScheduler
__version__ = "1.0.0"
__author__ = "MangoMAS Team"
__all__ = [
"SGD",
"Adam",
"AdamW",
"Optimizer",
"BackpropagationEngine",
"GradientDescentTrainer",
"CrossEntropyLoss",
"KLDivergenceLoss",
"LossFunction",
"GradientMonitor",
"TrainingMonitor",
"ModelWrapper",
"LinearScheduler",
"CosineScheduler",
"StepScheduler"
]