File size: 2,041 Bytes
5fe4d99 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
"""
COGNITIVE-CORE Framework
========================
Universal template for Ame Web Studio's cognitive AI architectures.
Provides standardized loading, checkpoint management, and utilities
for vision, language, world model, and multimodal cognitive systems.
Copyright © 2026 Mike Amega (Logo) - Ame Web Studio
License: Proprietary - All Rights Reserved
"""
from .cognitive_base import (
CognitiveConfig,
CognitiveModule,
MemoryModule,
TemporalModule,
WorldModelModule,
CognitivePreTrainedModel,
register_cognitive_model,
)
from .cognitive_checkpoint import (
remap_checkpoint_keys,
validate_checkpoint,
save_cognitive_checkpoint,
load_cognitive_checkpoint,
)
from .cognitive_utils import (
setup_environment,
get_device,
get_optimal_dtype,
get_memory_info,
clear_memory,
estimate_model_memory,
print_model_info,
print_training_progress,
get_hf_token,
)
from .cognitive_training import (
CognitiveTrainingConfig,
CognitiveTrainer,
prepare_dataset,
create_instruction_dataset,
quick_train,
CognitiveStateCallback,
)
__version__ = "1.0.0"
__author__ = "Mike Amega"
__license__ = "Proprietary"
__all__ = [
# Base classes
"CognitiveConfig",
"CognitiveModule",
"MemoryModule",
"TemporalModule",
"WorldModelModule",
"CognitivePreTrainedModel",
"register_cognitive_model",
# Checkpoint
"remap_checkpoint_keys",
"validate_checkpoint",
"save_cognitive_checkpoint",
"load_cognitive_checkpoint",
# Utils
"setup_environment",
"get_device",
"get_optimal_dtype",
"get_memory_info",
"clear_memory",
"estimate_model_memory",
"print_model_info",
"print_training_progress",
"get_hf_token",
# Training
"CognitiveTrainingConfig",
"CognitiveTrainer",
"prepare_dataset",
"create_instruction_dataset",
"quick_train",
"CognitiveStateCallback",
]
|