File size: 4,562 Bytes
b9eef49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
# ====================================================================
# configuration_unified.py
# ====================================================================

"""
Configuration class for Unified Language Model
HuggingFace Transformers compatible configuration with AutoClass support
"""

from transformers import PretrainedConfig
from typing import Optional

class UnifiedModelConfig(PretrainedConfig):
    """
    Configuration class for UnifiedModel.
    Inherits from PretrainedConfig for full HuggingFace compatibility.
    """
    model_type = "unified_model"
    
    def __init__(
        self,
        vocab_size: int = None,
        hidden_size: int = 256,
        intermediate_size: int = 1024,
        num_hidden_layers: int = 6,
        num_attention_heads: int = 8,
        num_key_value_heads: int = 4,
        max_position_embeddings: int = 2048,
        rms_norm_eps: float = 1e-6,
        rope_theta: float = 10000.0,
        
        attention_dropout: float = 0.1,
        mlp_dropout: float = 0.1,
        embedding_dropout: float = 0.1,
        
        xielu_alpha_p_init: float = 0.8,
        xielu_alpha_n_init: float = 0.8,
        xielu_beta: float = 0.5,
        
        tie_word_embeddings: bool = True,  # HuggingFace standard parameter name
        
        # LaX configuration (Linear only)
        lax_enabled: bool = True,
        lax_gate_type: str = "linear",  # Only "linear" supported now
        
        # Canon Layers configuration (A+C only)
        canon_enabled: bool = True,
        canon_kernel_size: int = 4,
        canon_a_enabled: bool = True,    # Before attention
        canon_c_enabled: bool = True,    # Before MLP
        # Canon B and D are permanently disabled
        
        # FANFormer configuration
        fanformer_p: float = 0.15,
        
        # HuggingFace standard parameters
        pad_token_id: int = None,
        bos_token_id: int = None,
        eos_token_id: int = None,
        
        **kwargs
    ):
        super().__init__(
            pad_token_id=pad_token_id,
            bos_token_id=bos_token_id,
            eos_token_id=eos_token_id,
            tie_word_embeddings=tie_word_embeddings,
            **kwargs
        )
        
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.intermediate_size = intermediate_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.num_key_value_heads = num_key_value_heads
        self.max_position_embeddings = max_position_embeddings
        self.rms_norm_eps = rms_norm_eps
        self.rope_theta = rope_theta
        
        self.attention_dropout = attention_dropout
        self.mlp_dropout = mlp_dropout
        self.embedding_dropout = embedding_dropout
        
        self.xielu_alpha_p_init = xielu_alpha_p_init
        self.xielu_alpha_n_init = xielu_alpha_n_init
        self.xielu_beta = xielu_beta
        self.tie_word_embeddings = tie_word_embeddings
        
        # LaX configuration
        self.lax_enabled = lax_enabled
        self.lax_gate_type = lax_gate_type
        
        # Canon Layers configuration
        self.canon_enabled = canon_enabled
        self.canon_kernel_size = canon_kernel_size
        self.canon_a_enabled = canon_a_enabled
        self.canon_c_enabled = canon_c_enabled
        
        # FANFormer
        self.fanformer_p = fanformer_p
        
        # ✅ FIXED: Force complete auto_map in config.json
        self.auto_map = {
            "AutoConfig": "configuration_unified.UnifiedModelConfig",
            "AutoModel": "modeling_unified.UnifiedModel", 
            "AutoModelForCausalLM": "modeling_unified.UnifiedModel"
        }

    def to_diff_dict(self):
        """
        ✅ FIXED: Fuerza la serialización de tie_word_embeddings en config.json
        
        Sobreescribe to_diff_dict() para asegurar que tie_word_embeddings
        siempre aparezca en el config.json, evitando problemas de carga
        donde HuggingFace no reconoce el weight tying.
        
        Returns:
            Dict: Configuración optimizada con tie_word_embeddings forzado
        """
        # Obtiene la serialización normal (solo diferencias)
        output = super().to_diff_dict()
        
        # ✅ FUERZA la inclusión de tie_word_embeddings
        # Esto asegura que aparezca en config.json sin importar si HF
        # considera que es "default" o no
        output["tie_word_embeddings"] = self.tie_word_embeddings
        
        return output