muooon commited on
Commit
740c67b
·
verified ·
1 Parent(s): 8ffba08

Upload 23 files

Browse files
.gitattributes CHANGED
@@ -95,3 +95,19 @@ report/zundamon/ZM047.webp filter=lfs diff=lfs merge=lfs -text
95
  report/zundamon/ZM048.webp filter=lfs diff=lfs merge=lfs -text
96
  report/zundamon/ZM049.webp filter=lfs diff=lfs merge=lfs -text
97
  report/zundamon/ZM050.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  report/zundamon/ZM048.webp filter=lfs diff=lfs merge=lfs -text
96
  report/zundamon/ZM049.webp filter=lfs diff=lfs merge=lfs -text
97
  report/zundamon/ZM050.webp filter=lfs diff=lfs merge=lfs -text
98
+ AMP-compatible/docs/rastrigin_EmoClan.png filter=lfs diff=lfs merge=lfs -text
99
+ AMP-compatible/docs/rastrigin_EmoFact.png filter=lfs diff=lfs merge=lfs -text
100
+ AMP-compatible/docs/rastrigin_EmoLynx.png filter=lfs diff=lfs merge=lfs -text
101
+ AMP-compatible/docs/rastrigin_EmoNavi.png filter=lfs diff=lfs merge=lfs -text
102
+ AMP-compatible/docs/rastrigin_EmoNeco.png filter=lfs diff=lfs merge=lfs -text
103
+ AMP-compatible/docs/rastrigin_EmoZeal.png filter=lfs diff=lfs merge=lfs -text
104
+ AMP-compatible/docs/rosenbrock_EmoClan.png filter=lfs diff=lfs merge=lfs -text
105
+ AMP-compatible/docs/rosenbrock_EmoFact.png filter=lfs diff=lfs merge=lfs -text
106
+ AMP-compatible/docs/rosenbrock_EmoLynx.png filter=lfs diff=lfs merge=lfs -text
107
+ AMP-compatible/docs/rosenbrock_EmoNavi.png filter=lfs diff=lfs merge=lfs -text
108
+ AMP-compatible/docs/rosenbrock_EmoNeco.png filter=lfs diff=lfs merge=lfs -text
109
+ AMP-compatible/docs/rosenbrock_EmoZeal.png filter=lfs diff=lfs merge=lfs -text
110
+ AMP-compatible/logs/fluctuation_and_accuracy_panel.png filter=lfs diff=lfs merge=lfs -text
111
+ AMP-compatible/logs/loss_comparison_panel.png filter=lfs diff=lfs merge=lfs -text
112
+ AMP-compatible/logs/trec_gpt2_weight_pca_3panel.png filter=lfs diff=lfs merge=lfs -text
113
+ AMP-compatible/logs/trec_weights_log.json filter=lfs diff=lfs merge=lfs -text
AMP-compatible/docs/rastrigin_EmoClan.png ADDED

Git LFS Details

  • SHA256: 4f8eef21b3f5b0cda9b4f77576178e404cbc3cf05c235ba4c8a0d6adb4a4bd1c
  • Pointer size: 131 Bytes
  • Size of remote file: 745 kB
AMP-compatible/docs/rastrigin_EmoFact.png ADDED

Git LFS Details

  • SHA256: f12bc7c3e0ad099eaf18db5eed0aaa49a22e926490c24e7c1461d020d9b89ed2
  • Pointer size: 131 Bytes
  • Size of remote file: 745 kB
AMP-compatible/docs/rastrigin_EmoLynx.png ADDED

Git LFS Details

  • SHA256: 986a1434173c2709c761e345fe02377f5f4f63db66076ad52ade521d6fc816ad
  • Pointer size: 131 Bytes
  • Size of remote file: 743 kB
AMP-compatible/docs/rastrigin_EmoNavi.png ADDED

Git LFS Details

  • SHA256: e128577ec5cfac12516f64aa64d36df57ecb969955c6b44714f91f65623fb2da
  • Pointer size: 131 Bytes
  • Size of remote file: 748 kB
AMP-compatible/docs/rastrigin_EmoNeco.png ADDED

Git LFS Details

  • SHA256: 399f7a0df2ba9c3f9b3d029aaf6bda53eb42f9a3b6d4528f18e69ee626b39858
  • Pointer size: 131 Bytes
  • Size of remote file: 743 kB
AMP-compatible/docs/rastrigin_EmoZeal.png ADDED

Git LFS Details

  • SHA256: 2bf9ec89e9fbb75d811ba31058ece87df6a858ff5cf6c426bddde8015aa18e21
  • Pointer size: 131 Bytes
  • Size of remote file: 745 kB
AMP-compatible/docs/rosenbrock_EmoClan.png ADDED

Git LFS Details

  • SHA256: 724066061af6a9b2e1f295ccf59d35bf5f34b5d92e2e8828a6e91396c74639f1
  • Pointer size: 131 Bytes
  • Size of remote file: 453 kB
AMP-compatible/docs/rosenbrock_EmoFact.png ADDED

Git LFS Details

  • SHA256: 394ffd0e91c799388a073d4f988a3954a430fa673b3bd2bf0066dea8c4a619aa
  • Pointer size: 131 Bytes
  • Size of remote file: 452 kB
AMP-compatible/docs/rosenbrock_EmoLynx.png ADDED

Git LFS Details

  • SHA256: 8c5fe9ed93bbb2734c705f6edc524ddc76ada069fe8c4326fb1d64e309e33109
  • Pointer size: 131 Bytes
  • Size of remote file: 405 kB
AMP-compatible/docs/rosenbrock_EmoNavi.png ADDED

Git LFS Details

  • SHA256: 7b8492f7754169900ed9acd9acf88a6cc4ac514e4483f7f72028568869891927
  • Pointer size: 131 Bytes
  • Size of remote file: 463 kB
AMP-compatible/docs/rosenbrock_EmoNeco.png ADDED

Git LFS Details

  • SHA256: 23dfe643fd868eefc446929b3fc9f54548d9dcfe69bdee658c906fd88e999b30
  • Pointer size: 131 Bytes
  • Size of remote file: 394 kB
AMP-compatible/docs/rosenbrock_EmoZeal.png ADDED

Git LFS Details

  • SHA256: 76baf8f9cdcf028a759ed087b376a76c2de3c51f4d52e65b2ba3e4a49f043baf
  • Pointer size: 131 Bytes
  • Size of remote file: 452 kB
AMP-compatible/emoclan.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.optim import Optimizer
3
+ import math
4
+ from typing import Callable, Union, Dict, Any, Tuple
5
+
6
+ """
7
+ AMP対応完了(202507) p.data -> p 修正済み
8
+ """
9
+
10
+ # Helper function
11
+ def exists(val):
12
+ return val is not None
13
+
14
+ class EmoClan(Optimizer):
15
+ def __init__(self, params: Union[list, torch.nn.Module],
16
+ lr: float = 1e-3,
17
+ betas: Tuple[float, float] = (0.9, 0.999),
18
+ eps: float = 1e-8,
19
+ weight_decay: float = 0.01,
20
+ lynx_betas: Tuple[float, float] = (0.9, 0.99), # Lynx 固有の beta
21
+ decoupled_weight_decay: bool = False
22
+ ):
23
+
24
+ if not 0.0 <= lr:
25
+ raise ValueError(f"Invalid learning rate: {lr}")
26
+ if not 0.0 <= eps:
27
+ raise ValueError(f"Invalid epsilon value: {eps}")
28
+ if not 0.0 <= betas[0] < 1.0:
29
+ raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
30
+ if not 0.0 <= betas[1] < 1.0:
31
+ raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
32
+
33
+ # Lynx の betas もバリデーション
34
+ if not 0.0 <= lynx_betas[0] < 1.0:
35
+ raise ValueError(f"Invalid lynx_beta parameter at index 0: {lynx_betas[0]}")
36
+ if not 0.0 <= lynx_betas[1] < 1.0:
37
+ raise ValueError(f"Invalid lynx_beta parameter at index 1: {lynx_betas[1]}")
38
+
39
+ defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
40
+ lynx_betas=lynx_betas, decoupled_weight_decay=decoupled_weight_decay)
41
+ super().__init__(params, defaults)
42
+
43
+ self._init_lr = lr # decoupled weight decay のために保存 (Lynx用)
44
+ self.should_stop = False # 全体の停止フラグ
45
+
46
+ # --- 感情機構 (Emotion Mechanism) ---
47
+ def _update_ema(self, param_state: Dict[str, Any], loss_val: float) -> Dict[str, float]:
48
+ """損失値に基づいて短期・長期 EMA を更新"""
49
+ # param_state は各パラメータの state['ema'] を保持する
50
+ ema = param_state.setdefault('ema', {'short': loss_val, 'long': loss_val})
51
+ ema['short'] = 0.3 * loss_val + 0.7 * ema['short']
52
+ ema['long'] = 0.01 * loss_val + 0.99 * ema['long']
53
+ return ema
54
+
55
+ def _compute_scalar(self, ema: Dict[str, float]) -> float:
56
+ """EMA の差分から感情スカラー値を生成"""
57
+ diff = ema['short'] - ema['long']
58
+ return math.tanh(5 * diff)
59
+
60
+ def _decide_ratio(self, scalar: float) -> float:
61
+ """感情スカラーに基づいて Shadow の混合比率を決定"""
62
+ if scalar > 0.6:
63
+ return 0.7 + 0.2 * scalar # 0.7~0.9
64
+ elif scalar < -0.6:
65
+ return 0.1
66
+ elif abs(scalar) > 0.3: # >0.3 かつ <=0.6 の場合
67
+ return 0.3
68
+ return 0.0
69
+
70
+ # --- 各最適化器のコアな勾配更新ロジック (プライベートメソッドとして統合) ---
71
+
72
+ def _lynx_update(
73
+ self,
74
+ p: torch.Tensor,
75
+ grad: torch.Tensor,
76
+ param_state: Dict[str, Any],
77
+ lr: float,
78
+ beta1: float,
79
+ beta2: float,
80
+ wd_actual: float
81
+ ):
82
+ """EmoLynx のコアな勾配更新ロジック"""
83
+ # Stepweight decay: p = p * (1 - lr * wd)
84
+ p.mul_(1. - lr * wd_actual)
85
+
86
+ # Lynx 固有の EMA 状態は param_state に保持
87
+ if 'exp_avg_lynx' not in param_state:
88
+ param_state['exp_avg_lynx'] = torch.zeros_like(p)
89
+ exp_avg = param_state['exp_avg_lynx']
90
+
91
+ # 勾配ブレンド
92
+ blended_grad = grad.mul(1. - beta1).add_(exp_avg, alpha=beta1)
93
+
94
+ # 符号ベースの更新
95
+ p.add_(blended_grad.sign_(), alpha = -lr)
96
+
97
+ # exp_avg 更新
98
+ exp_avg.mul_(beta2).add_(grad, alpha = 1. - beta2)
99
+
100
+ def _navi_update(
101
+ self,
102
+ p: torch.Tensor,
103
+ grad: torch.Tensor,
104
+ param_state: Dict[str, Any],
105
+ lr: float,
106
+ betas: Tuple[float, float],
107
+ eps: float,
108
+ weight_decay: float
109
+ ):
110
+ """EmoNavi のコアな勾配更新ロジック"""
111
+ beta1, beta2 = betas
112
+
113
+ exp_avg = param_state.setdefault('exp_avg_navi', torch.zeros_like(p))
114
+ exp_avg_sq = param_state.setdefault('exp_avg_sq_navi', torch.zeros_like(p.to(torch.float32)))
115
+
116
+ exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
117
+ exp_avg_sq.mul_(beta2).addcmul_(grad.to(torch.float32), grad.to(torch.float32), value=1 - beta2)
118
+ denom = exp_avg_sq.sqrt().add_(eps)
119
+
120
+ # Weight decay (標準的手法)
121
+ if weight_decay:
122
+ p.mul_(1 - lr * weight_decay)
123
+
124
+ p.addcdiv_(exp_avg, denom, value=-lr)
125
+
126
+ def _fact_update(
127
+ self,
128
+ p: torch.Tensor,
129
+ grad: torch.Tensor,
130
+ param_state: Dict[str, Any],
131
+ lr: float,
132
+ betas: Tuple[float, float], # beta2 は現状使われないが互換性のため残す (1D勾配で使用)
133
+ eps: float,
134
+ weight_decay: float
135
+ ):
136
+ """EmoFact のコアな勾配更新ロジック (Adafactor ライク)"""
137
+ beta1, beta2 = betas
138
+
139
+ if grad.dim() >= 2:
140
+ # 行と列の2乗平均を計算 (分散の軽量な近似)
141
+ # gradをfloat32にキャストして計算することで数値安定性を高める
142
+ r_sq = torch.mean(grad.to(torch.float32) * grad.to(torch.float32), dim=tuple(range(1, grad.dim())), keepdim=True).add_(eps)
143
+ c_sq = torch.mean(grad.to(torch.float32) * grad.to(torch.float32), dim=0, keepdim=True).add_(eps)
144
+
145
+ param_state.setdefault('exp_avg_r_fact', torch.zeros_like(r_sq)).mul_(beta1).add_(torch.sqrt(r_sq), alpha=1 - beta1)
146
+ param_state.setdefault('exp_avg_c_fact', torch.zeros_like(c_sq)).mul_(beta1).add_(torch.sqrt(c_sq), alpha=1 - beta1)
147
+
148
+ # 再構築した近似勾配の平方根の積で正規化
149
+ denom = torch.sqrt(param_state['exp_avg_r_fact'] * param_state['exp_avg_c_fact']).add_(eps)
150
+ update_term = grad / denom # grad は元の型(float16またはfloat32)
151
+
152
+ else: # 1次元(ベクトル)の勾配補正
153
+ exp_avg = param_state.setdefault('exp_avg_fact', torch.zeros_like(p))
154
+ exp_avg_sq = param_state.setdefault('exp_avg_sq_fact', torch.zeros_like(p.to(torch.float32)))
155
+
156
+ exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
157
+ exp_avg_sq.mul_(beta2).addcmul_(grad.to(torch.float32), grad.to(torch.float32), value=1 - beta2)
158
+ denom = exp_avg_sq.sqrt().add_(eps)
159
+ update_term = exp_avg / denom
160
+
161
+ # 最終的なパラメータ更新 (decoupled weight decayも適用)
162
+ # decoupled_weight_decay は __init__ でグループにdefaultsとして渡されているが、
163
+ # ここではfactorロジック自体がweight_decayを受け取る形式
164
+ p.mul_(1 - weight_decay * lr)
165
+ p.add_(update_term, alpha=-lr)
166
+
167
+
168
+ @torch.no_grad()
169
+ def step(self, closure: Callable | None = None):
170
+ loss = None
171
+ if exists(closure):
172
+ with torch.enable_grad():
173
+ loss = closure()
174
+ loss_val = loss.item() if loss is not None else 0.0
175
+
176
+ # 全体の scalar_hist を EmoClan インスタンスで管理
177
+ global_scalar_hist = self.state.setdefault('global_scalar_hist', [])
178
+
179
+ # 全体としての感情EMA状態を self.state に保持し、現在の感情スカラーを計算
180
+ global_ema_state = self.state.setdefault('global_ema', {'short': loss_val, 'long': loss_val})
181
+ global_ema_state['short'] = 0.3 * loss_val + 0.7 * global_ema_state['short']
182
+ global_ema_state['long'] = 0.01 * loss_val + 0.99 * global_ema_state['long']
183
+ current_global_scalar = self._compute_scalar(global_ema_state)
184
+
185
+ # global_scalar_hist に現在の感情スカラーを追加
186
+ global_scalar_hist.append(current_global_scalar)
187
+ if len(global_scalar_hist) >= 33:
188
+ global_scalar_hist.pop(0)
189
+
190
+
191
+ for group in self.param_groups:
192
+ lr = group['lr']
193
+ wd = group['weight_decay']
194
+ eps = group['eps']
195
+ decoupled_wd = group['decoupled_weight_decay']
196
+
197
+ lynx_beta1, lynx_beta2 = group['lynx_betas']
198
+ navi_fact_betas = group['betas'] # Navi/Fact 共通の beta を使用 (デフォルトの betas)
199
+
200
+ # Lynx の decoupled_wd のための _wd_actual 計算
201
+ _wd_actual_lynx = wd
202
+ if decoupled_wd:
203
+ _wd_actual_lynx /= self._init_lr
204
+
205
+ for p in group['params']:
206
+ if p.grad is None:
207
+ continue
208
+
209
+ grad = p.grad
210
+ param_state = self.state[p] # 各パラメータごとの状態
211
+
212
+ # --- 各パラメータごとの感情機構の更新と Shadow 処理 ---
213
+ # 各パラメータの state['ema'] は、それぞれの loss_val (全体で共通) を元に更新される
214
+ # ただし、現状の loss_val はクロージャから受け取った単一の値なので、
215
+ # 各パラメータ固有の「感情」を定義するより、全体としての感情が使われることになる。
216
+ param_ema = self._update_ema(param_state, loss_val)
217
+ param_scalar = self._compute_scalar(param_ema) # 各パラメータ固有のスカラー
218
+
219
+ ratio = self._decide_ratio(param_scalar) # 各パラメータ固有の ratio
220
+
221
+ if ratio > 0:
222
+ if 'shadow' not in param_state:
223
+ param_state['shadow'] = p.clone()
224
+ else:
225
+ # Shadow を現在値にブレンド
226
+ p.mul_(1 - ratio).add_(param_state['shadow'], alpha=ratio)
227
+ # Shadow を現在値に追従させる
228
+ param_state['shadow'].lerp_(p, 0.05)
229
+
230
+ # --- 最適化器の選択と勾配更新 ---
231
+ # 現在のglobal_scalar_histに記録された全体としての感情スカラーに基づいてフェーズを判断
232
+ # global_scalar が [-0.3, 0.3] の範囲にある場合は Navi
233
+ # global_scalar > 0.3 の場合は Lynx
234
+ # global_scalar < -0.3 の場合は Fact
235
+ if current_global_scalar > 0.3: # 序盤・過学習傾向時
236
+ self._lynx_update(p, grad, param_state, lr, lynx_beta1, lynx_beta2, _wd_actual_lynx)
237
+ elif current_global_scalar < -0.3: # 終盤・発散傾向時
238
+ self._fact_update(p, grad, param_state, lr, navi_fact_betas, eps, wd)
239
+ else: # -0.3 <= current_global_scalar <= 0.3 の中盤
240
+ self._navi_update(p, grad, param_state, lr, navi_fact_betas, eps, wd)
241
+
242
+ # Early Stop判断
243
+ # global_scalar_hist の評価
244
+ if len(global_scalar_hist) >= 32:
245
+ buf = global_scalar_hist
246
+ avg_abs = sum(abs(s) for s in buf) / len(buf)
247
+ std = sum((s - sum(buf)/len(buf))**2 for s in buf) / len(buf)
248
+ if avg_abs < 0.05 and std < 0.005:
249
+ self.should_stop = True # 外部からこれを見て判断可
250
+
251
+ return loss
252
+
253
+ """
254
+ Emoシリーズは、Adam、Adafactor、Lion、Tiger、等から多くを学びました。
255
+ この開発において先人たちの知見に深く感謝しつつ今後も新しい可能性を探究します。
256
+ The Emo series has learned much from Adam, Adafactor, Lion, and Tiger.
257
+ Rather than being their successors,
258
+ In its development, we deeply appreciate the insights of those who came before us—and continue to explore new possibilities beyond them.
259
+ """
AMP-compatible/emofact.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.optim import Optimizer
3
+ import math
4
+
5
+ """
6
+ AMP対応完了(202507) p.data -> p 修正済み
7
+ """
8
+
9
+ class EmoFact(Optimizer):
10
+ # クラス定義&初期化
11
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999),
12
+ eps=1e-8, weight_decay=0.01):
13
+ defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
14
+ super().__init__(params, defaults)
15
+ self._init_lr = lr
16
+ self.should_stop = False # 停止フラグの初期化
17
+
18
+ # 感情EMA更新(緊張と安静)
19
+ def _update_ema(self, state, loss_val):
20
+ ema = state.setdefault('ema', {})
21
+ ema['short'] = 0.3 * loss_val + 0.7 * ema.get('short', loss_val)
22
+ ema['long'] = 0.01 * loss_val + 0.99 * ema.get('long', loss_val)
23
+ return ema
24
+
25
+ # 感情スカラー値生成(EMA差分、滑らかな非線形スカラー、tanh 5 * diff で鋭敏さ強調)
26
+ def _compute_scalar(self, ema):
27
+ diff = ema['short'] - ema['long']
28
+ return math.tanh(5 * diff)
29
+
30
+ # Shadow混合比率(> 0.6:70〜90%、 < -0.6:10%、 abs> 0.3:30%、 平時:0%)
31
+ def _decide_ratio(self, scalar):
32
+ if scalar > 0.6:
33
+ return 0.7 + 0.2 * scalar
34
+ elif scalar < -0.6:
35
+ return 0.1
36
+ elif abs(scalar) > 0.3:
37
+ return 0.3
38
+ return 0.0
39
+
40
+ # 損失取得(損失値 loss_val を数値化、感情判定に使用、存在しないパラメータ(更新不要)はスキップ)
41
+ @torch.no_grad()
42
+ def step(self, closure=None):
43
+ loss = closure() if closure is not None else None
44
+ loss_val = loss.item() if loss is not None else 0.0
45
+
46
+ for group in self.param_groups:
47
+ for p in group['params']:
48
+ if p.grad is None:
49
+ continue
50
+
51
+ grad = p.grad
52
+ state = self.state[p]
53
+
54
+ # 感情EMA更新・スカラー生成 (既存ロジックを維持)
55
+ ema = self._update_ema(state, loss_val)
56
+ scalar = self._compute_scalar(ema)
57
+ ratio = self._decide_ratio(scalar)
58
+
59
+ # shadow_param:必要時のみ更新 (既存ロジックを維持)
60
+ if ratio > 0:
61
+ if 'shadow' not in state:
62
+ state['shadow'] = p.clone()
63
+ else:
64
+ p.mul_(1 - ratio).add_(state['shadow'], alpha=ratio)
65
+ state['shadow'].lerp_(p, 0.05)
66
+
67
+ # --- 勾配補正ロジック ---
68
+ # 行列の形状が2次元以上の場合、分散情報ベースのAB近似を使用
69
+ if grad.dim() >= 2:
70
+ # 行と列の2乗平均を計算 (分散の軽量な近似)
71
+ r_sq = torch.mean(grad * grad, dim=tuple(range(1, grad.dim())), keepdim=True).add_(group['eps'])
72
+ c_sq = torch.mean(grad * grad, dim=0, keepdim=True).add_(group['eps'])
73
+
74
+ # 分散情報から勾配の近似行列を生成
75
+ # AB行列として見立てたものを直接生成し更新項を計算する
76
+ # A = sqrt(r_sq), B = sqrt(c_sq) とすることでAB行列の近似を再現
77
+ # これをEMAで平滑化する
78
+ beta1, beta2 = group['betas']
79
+
80
+ state.setdefault('exp_avg_r', torch.zeros_like(r_sq)).mul_(beta1).add_(torch.sqrt(r_sq), alpha=1 - beta1)
81
+ state.setdefault('exp_avg_c', torch.zeros_like(c_sq)).mul_(beta1).add_(torch.sqrt(c_sq), alpha=1 - beta1)
82
+
83
+ # 再構築した近似勾配の平方根の積で正規化
84
+ # これにより2次モーメントのような役割を果たす
85
+ denom = torch.sqrt(state['exp_avg_r'] * state['exp_avg_c']).add_(group['eps'])
86
+
87
+ # 最終的な更新項を計算
88
+ update_term = grad / denom
89
+
90
+ # 1次元(ベクトル)の勾配補正(decoupled weight decay 構造に近い)
91
+ else:
92
+ exp_avg = state.setdefault('exp_avg', torch.zeros_like(p))
93
+ exp_avg_sq = state.setdefault('exp_avg_sq', torch.zeros_like(p))
94
+ beta1, beta2 = group['betas']
95
+ exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
96
+ exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
97
+ denom = exp_avg_sq.sqrt().add_(group['eps'])
98
+ update_term = exp_avg / denom
99
+
100
+ # 最終的なパラメータ更新 (decoupled weight decayも適用)
101
+ p.add_(p, alpha=-group['weight_decay'] * group['lr'])
102
+ p.add_(update_term, alpha=-group['lr'])
103
+
104
+ # --- Early Stop ロジック (既存ロ���ックを維持) ---
105
+ hist = self.state.setdefault('scalar_hist', [])
106
+ hist.append(scalar)
107
+ if len(hist) >= 33:
108
+ hist.pop(0)
109
+
110
+ # Early Stop判断
111
+ if len(self.state['scalar_hist']) >= 32:
112
+ buf = self.state['scalar_hist']
113
+ avg_abs = sum(abs(s) for s in buf) / len(buf)
114
+ std = sum((s - sum(buf)/len(buf))**2 for s in buf) / len(buf)
115
+ if avg_abs < 0.05 and std < 0.005:
116
+ self.should_stop = True
117
+
118
+ return loss
119
+
120
+ """
121
+ https://github.com/muooon/EmoNavi
122
+ Fact is inspired by Adafactor,
123
+ and its VRAM-friendly design is something everyone loves.
124
+ """
AMP-compatible/emolynx.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.optim import Optimizer
3
+ import math
4
+ from typing import Tuple, Callable, Union
5
+
6
+ """
7
+ AMP対応完了(202507) p.data -> p 修正済み
8
+ """
9
+
10
+ # Helper function (Lynx)
11
+ def exists(val):
12
+ return val is not None
13
+
14
+ class EmoLynx(Optimizer):
15
+ # クラス定義&初期化
16
+ def __init__(self, params: Union[list, torch.nn.Module], lr=1e-3, betas=(0.9, 0.99),
17
+ # lynx用ベータ・互換性の追加(lynx用beta1・beta2)
18
+ eps=1e-8, weight_decay=0.01, decoupled_weight_decay: bool = False):
19
+
20
+ defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
21
+ super().__init__(params, defaults)
22
+
23
+ # lynxに応じてウェイト減衰のため保存
24
+ self._init_lr = lr
25
+ self.should_stop = False # 停止フラグの初期化
26
+ self.decoupled_wd = decoupled_weight_decay
27
+
28
+ # 感情EMA更新(緊張と安静)
29
+ def _update_ema(self, state, loss_val):
30
+ ema = state.setdefault('ema', {})
31
+ ema['short'] = 0.3 * loss_val + 0.7 * ema.get('short', loss_val)
32
+ ema['long'] = 0.01 * loss_val + 0.99 * ema.get('long', loss_val)
33
+ return ema
34
+
35
+ # 感情スカラー値生成(EMA差分、滑らかな非線形スカラー、tanh 5 * diff で鋭敏さ強調)
36
+ def _compute_scalar(self, ema):
37
+ diff = ema['short'] - ema['long']
38
+ return math.tanh(5 * diff)
39
+
40
+ # Shadow混合比率(> 0.6:70〜90%、 < -0.6:10%、 abs> 0.3:30%、 平時:0%)
41
+ def _decide_ratio(self, scalar):
42
+ if scalar > 0.6:
43
+ return 0.7 + 0.2 * scalar
44
+ elif scalar < -0.6:
45
+ return 0.1
46
+ elif abs(scalar) > 0.3:
47
+ return 0.3
48
+ return 0.0
49
+
50
+ # 損失取得(損失値 loss_val を数値化、感情判定に使用、存在しないパラメータ(更新不要)はスキップ)
51
+ @torch.no_grad()
52
+ def step(self, closure: Callable | None = None): # クロージャの型ヒントを追加
53
+ loss = None
54
+ if exists(closure): # 一貫性のためにexistsヘルパーを使う
55
+ with torch.enable_grad():
56
+ loss = closure()
57
+ loss_val = loss.item() if loss is not None else 0.0
58
+
59
+ for group in self.param_groups:
60
+ # リンクス共通パラメータ抽出
61
+ lr, wd, beta1, beta2 = group['lr'], group['weight_decay'], *group['betas']
62
+
63
+ # ウェイト減衰の処理を分離 (from lynx)
64
+ _wd_actual = wd
65
+ if self.decoupled_wd:
66
+ _wd_actual /= self._init_lr # 非連結時ウェイト減衰調整
67
+
68
+ for p in filter(lambda p: exists(p.grad), group['params']): # PGチェックにフィルタ
69
+
70
+ grad = p.grad # PG直接使用(計算に".data"不要)
71
+ state = self.state[p]
72
+
73
+ # EMA更新・スカラー生成(EMA差分からスカラーを生成しスパイク比率を決定)
74
+ ema = self._update_ema(state, loss_val)
75
+ scalar = self._compute_scalar(ema)
76
+ ratio = self._decide_ratio(scalar)
77
+
78
+ # shadow_param:必要時のみ更新(スパイク部分に現在値を5%ずつ追従させる動的履歴)
79
+ if ratio > 0:
80
+ if 'shadow' not in state:
81
+ state['shadow'] = p.clone()
82
+ else:
83
+ p.mul_(1 - ratio).add_(state['shadow'], alpha=ratio)
84
+ state['shadow'].lerp_(p, 0.05)
85
+ # lynx更新前 p で shadow 更新(現在値を5%ずつ追従)
86
+ # p.mul_(1 - ratio).add_(state['shadow'], alpha=ratio)
87
+ # EmoNavi: p = p * (1-ratio) + shadow * ratio
88
+
89
+ # --- Start Lynx Gradient Update Logic ---
90
+
91
+ # lynx初期化(exp_avg_sq)
92
+ if 'exp_avg' not in state:
93
+ state['exp_avg'] = torch.zeros_like(p)
94
+ exp_avg = state['exp_avg']
95
+
96
+ # Stepweight decay (from lynx): p = p * (1 - lr * wd)
97
+ # decoupled_wd 考慮 _wd_actual 使用(EmoNaviのwdは最後に適用)
98
+ p.mul_(1. - lr * _wd_actual)
99
+
100
+ # 勾配ブレンド
101
+ # m_t = beta1 * exp_avg_prev + (1 - beta1) * grad
102
+ blended_grad = grad.mul(1. - beta1).add_(exp_avg, alpha=beta1)
103
+
104
+ # p: p = p - lr * sign(blended_grad)
105
+ p.add_(blended_grad.sign_(), alpha = -lr)
106
+
107
+ # exp_avg = beta2 * exp_avg + (1 - beta2) * grad
108
+ exp_avg.mul_(beta2).add_(grad, alpha = 1. - beta2)
109
+
110
+ # --- End Lynx Gradient Update Logic ---
111
+
112
+ # Early Stop用 scalar記録(バッファ共通で管理/最大32件保持/動静評価)
113
+ # この部分は p.state ではなく self.state にアクセスする
114
+ hist = self.state.setdefault('scalar_hist', [])
115
+ hist.append(scalar)
116
+ if len(hist) >= 33:
117
+ hist.pop(0)
118
+
119
+ # Early Stop判断(静けさの合図) - This part is outside the inner loop
120
+ if len(self.state['scalar_hist']) >= 32:
121
+ buf = self.state['scalar_hist']
122
+ avg_abs = sum(abs(s) for s in buf) / len(buf)
123
+ std = sum((s - sum(buf)/len(buf))**2 for s in buf) / len(buf)
124
+ if avg_abs < 0.05 and std < 0.005:
125
+ self.should_stop = True # 外部からこれを見て判断可
126
+
127
+ return loss
128
+
129
+ """
130
+ https://github.com/muooon/EmoNavi
131
+ Lynx was developed with inspiration from Lion and Tiger,
132
+ which we deeply respect for their lightweight and intelligent design.
133
+ Lynx also integrates EmoNAVI to enhance its capabilities.
134
+ """
AMP-compatible/emonavi.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.optim import Optimizer
3
+ import math
4
+
5
+ """
6
+ AMP対応完了(202507) p.data -> p 修正済み
7
+ """
8
+
9
+ class EmoNavi(Optimizer):
10
+ # クラス定義&初期化
11
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999),
12
+ eps=1e-8, weight_decay=0.01):
13
+ defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
14
+ super().__init__(params, defaults)
15
+ self._init_lr = lr
16
+ self.should_stop = False # 停止フラグの初期化
17
+
18
+ # 感情EMA更新(緊張と安静)
19
+ def _update_ema(self, state, loss_val):
20
+ ema = state.setdefault('ema', {})
21
+ ema['short'] = 0.3 * loss_val + 0.7 * ema.get('short', loss_val)
22
+ ema['long'] = 0.01 * loss_val + 0.99 * ema.get('long', loss_val)
23
+ return ema
24
+
25
+ # 感情スカラー値生成(EMA差分、滑らかな非線形スカラー、tanh 5 * diff で鋭敏さ強調)
26
+ def _compute_scalar(self, ema):
27
+ diff = ema['short'] - ema['long']
28
+ return math.tanh(5 * diff)
29
+
30
+ # Shadow混合比率(> 0.6:70〜90%、 < -0.6:10%、 abs> 0.3:30%、 平時:0%)
31
+ def _decide_ratio(self, scalar):
32
+ if scalar > 0.6:
33
+ return 0.7 + 0.2 * scalar
34
+ elif scalar < -0.6:
35
+ return 0.1
36
+ elif abs(scalar) > 0.3:
37
+ return 0.3
38
+ return 0.0
39
+
40
+ # 損失取得(損失値 loss_val を数値化、感情判定に使用、存在しないパラメータ(更新不要)はスキップ)
41
+ @torch.no_grad()
42
+ def step(self, closure=None):
43
+ loss = closure() if closure is not None else None
44
+ loss_val = loss.item() if loss is not None else 0.0
45
+
46
+ for group in self.param_groups:
47
+ for p in group['params']:
48
+ if p.grad is None:
49
+ continue
50
+
51
+ grad = p.grad
52
+ state = self.state[p]
53
+
54
+ # EMA更新・スカラー生成(EMA差分からスカラーを生成しスパイク比率を決定)
55
+ ema = self._update_ema(state, loss_val)
56
+ scalar = self._compute_scalar(ema)
57
+ ratio = self._decide_ratio(scalar)
58
+
59
+ # shadow_param:必要時のみ更新(スパイク部分に現在値を5%ずつ追従させる動的履歴)
60
+ if ratio > 0:
61
+ if 'shadow' not in state:
62
+ state['shadow'] = p.clone()
63
+ else:
64
+ p.mul_(1 - ratio).add_(state['shadow'], alpha=ratio)
65
+ state['shadow'].lerp_(p, 0.05)
66
+
67
+ # スカラー生成:短期と長期EMAの差分から信号を得る(高ぶりの強さ)
68
+ # 混合比率:スカラーが閾値を超える場合にのみ計算される(信頼できる感情信号かどうかの選別)
69
+ # → スカラー値が小さい場合は ratio = 0 となり、shadow混合は行われない
70
+ # → 信頼できる強い差分のときのみ感情機構が発動する(暗黙の信頼度判定)
71
+
72
+ # 1次・2次モーメントを使った勾配補正(decoupled weight decay 構造に近い)
73
+ exp_avg = state.setdefault('exp_avg', torch.zeros_like(p))
74
+ exp_avg_sq = state.setdefault('exp_avg_sq', torch.zeros_like(p))
75
+ beta1, beta2 = group['betas']
76
+ exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
77
+ exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
78
+ denom = exp_avg_sq.sqrt().add_(group['eps'])
79
+
80
+ step_size = group['lr']
81
+ if group['weight_decay']:
82
+ p.add_(p, alpha=-group['weight_decay'] * step_size)
83
+ p.addcdiv_(exp_avg, denom, value=-step_size)
84
+
85
+ # 感情機構の発火が収まり"十分に安定"していることを外部伝達できる(自動停止ロジックではない)
86
+ # Early Stop用 scalar 記録(バッファ共通で管理/最大32件保持/動静評価)
87
+ hist = self.state.setdefault('scalar_hist', [])
88
+ hist.append(scalar)
89
+ if len(hist) >= 33:
90
+ hist.pop(0)
91
+
92
+ # Early Stop判断(静けさの合図)
93
+ if len(self.state['scalar_hist']) >= 32:
94
+ buf = self.state['scalar_hist']
95
+ avg_abs = sum(abs(s) for s in buf) / len(buf)
96
+ std = sum((s - sum(buf)/len(buf))**2 for s in buf) / len(buf)
97
+ if avg_abs < 0.05 and std < 0.005:
98
+ self.should_stop = True # 💡 外部からこれを見て判断可
99
+
100
+ # 32ステップ分のスカラー値の静かな条件を満たした時"フラグ" should_stop = True になるだけ
101
+
102
+ return loss
103
+
104
+ """
105
+ https://github.com/muooon/EmoNavi
106
+ An emotion-driven optimizer that feels loss and navigates accordingly.
107
+ Don't think. Feel. Don't stop. Keep running. Believe in what's beyond.
108
+ """
AMP-compatible/emoneco.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.optim import Optimizer
3
+ import math
4
+ from typing import Tuple, Callable, Union
5
+
6
+ """
7
+ AMP対応完了(202507) p.data -> p 修正済み
8
+ """
9
+
10
+ # Helper function (Lynx)
11
+ def exists(val):
12
+ return val is not None
13
+ # Soft Sign 関数
14
+ def softsign(x):
15
+ return x / (1 + x.abs())
16
+
17
+ class EmoNeco(Optimizer):
18
+ # クラス定義&初期化
19
+ def __init__(self, params: Union[list, torch.nn.Module], lr=1e-3, betas=(0.9, 0.99),
20
+ # neco用ベータ・互換性の追加(neco用beta1・beta2)
21
+ eps=1e-8, weight_decay=0.01, decoupled_weight_decay: bool = False):
22
+
23
+ defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
24
+ super().__init__(params, defaults)
25
+
26
+ # ウェイト減衰のため保存
27
+ self._init_lr = lr
28
+ self.decoupled_wd = decoupled_weight_decay
29
+ self.should_stop = False # 停止フラグの初期化
30
+
31
+ # 感情EMA更新(緊張と安静)
32
+ def _update_ema(self, state, loss_val):
33
+ ema = state.setdefault('ema', {})
34
+ ema['short'] = 0.3 * loss_val + 0.7 * ema.get('short', loss_val)
35
+ ema['long'] = 0.01 * loss_val + 0.99 * ema.get('long', loss_val)
36
+ return ema
37
+
38
+ # 感情スカラー値生成(EMA差分、滑らかな非線形スカラー、tanh 5 * diff で鋭敏さ強調)
39
+ def _compute_scalar(self, ema):
40
+ diff = ema['short'] - ema['long']
41
+ return math.tanh(5 * diff)
42
+
43
+ # Shadow混合比率(> 0.6:70〜90%、 < -0.6:10%、 abs> 0.3:30%、 平時:0%)
44
+ def _decide_ratio(self, scalar):
45
+ if scalar > 0.6:
46
+ return 0.7 + 0.2 * scalar
47
+ elif scalar < -0.6:
48
+ return 0.1
49
+ elif abs(scalar) > 0.3:
50
+ return 0.3
51
+ return 0.0
52
+
53
+ # 損失取得(損失値 loss_val を数値化、感情判定に使用、存在しないパラメータ(更新不要)はスキップ)
54
+ @torch.no_grad()
55
+ def step(self, closure: Callable | None = None): # クロージャの型ヒントを追加
56
+ loss = None
57
+ if exists(closure): # 一貫性のためにexistsヘルパーを使う
58
+ with torch.enable_grad():
59
+ loss = closure()
60
+ loss_val = loss.item() if loss is not None else 0.0
61
+
62
+ for group in self.param_groups:
63
+ # 共通パラメータ抽出
64
+ lr, wd, beta1, beta2 = group['lr'], group['weight_decay'], *group['betas']
65
+
66
+ # ウェイト減衰の処理を分離 (from lynx)
67
+ _wd_actual = wd
68
+ if self.decoupled_wd:
69
+ _wd_actual /= self._init_lr # 非連結時ウェイト減衰調整
70
+
71
+ for p in filter(lambda p: exists(p.grad), group['params']): # PGチェックにフィルタ
72
+
73
+ grad = p.grad # PG直接使用(計算に".data"不要)
74
+ state = self.state[p]
75
+
76
+ # EMA更新・スカラー生成(EMA差分からスカラーを生成しスパイク比率を決定)
77
+ ema = self._update_ema(state, loss_val)
78
+ scalar = self._compute_scalar(ema)
79
+ ratio = self._decide_ratio(scalar)
80
+
81
+ # shadow_param:必要時のみ更新(スパイク部分に現在値を5%ずつ追従させる動的履歴)
82
+ if ratio > 0:
83
+ if 'shadow' not in state:
84
+ state['shadow'] = p.clone()
85
+ else:
86
+ p.mul_(1 - ratio).add_(state['shadow'], alpha=ratio)
87
+ state['shadow'].lerp_(p, 0.05)
88
+ # 更新前 p で shadow 更新(現在値を5%ずつ追従)
89
+ # p.mul_(1 - ratio).add_(state['shadow'], alpha=ratio)
90
+ # EmoNavi: p = p * (1-ratio) + shadow * ratio
91
+
92
+ # --- Start Neco Gradient Update Logic ---
93
+
94
+ # neco初期化(exp_avg_sq)
95
+ if 'exp_avg' not in state:
96
+ state['exp_avg'] = torch.zeros_like(p)
97
+ exp_avg = state['exp_avg']
98
+
99
+ # Stepweight decay (from lynx): p = p * (1 - lr * wd)
100
+ # decoupled_wd 考慮 _wd_actual 使用(EmoNaviのwdは最後に適用)
101
+ p.mul_(1. - lr * _wd_actual)
102
+
103
+ # 勾配ブレンド
104
+ # m_t = beta1 * exp_avg_prev + (1 - beta1) * grad
105
+ blended_grad = grad.mul(1. - beta1).add_(exp_avg, alpha=beta1)
106
+ grad_norm = torch.norm(grad, dtype=torch.float32) # 勾配ノルムの計算
107
+
108
+ # scalar < -0.3 の場合のみ SoftSign、それ以外 Cautious (終盤や発散傾向をSSに)
109
+ # p - lr * softsign(blended_grad) (from softsign)
110
+ # p - lr * direction * mask (from Cautious)
111
+ # safe_norm 極値のブレンド勾配に対するスケーリング
112
+ if 0.3 < scalar <= 0.5:
113
+ safe_norm = grad_norm + eps
114
+ modified_grad = softsign(blended_grad) * safe_norm
115
+ p.add_(-lr * modified_grad)
116
+ elif scalar < -0.3:
117
+ p.add_(softsign(blended_grad), alpha = -lr) # Soft Sign 処理
118
+ else:
119
+ direction = blended_grad.sign() # 勾配方向の符号 Cautious 処理
120
+ mask = (direction == grad.sign()) # 過去の勾配と方向が一致している部分のみ更新
121
+ p.add_(direction * mask, alpha = -lr) # Cautious 更新
122
+
123
+ # exp_avg = beta2 * exp_avg + (1 - beta2) * grad
124
+ exp_avg.mul_(beta2).add_(grad, alpha = 1. - beta2)
125
+
126
+ # --- End Neco Gradient Update Logic ---
127
+
128
+ # Early Stop用 scalar記録(バッファ共通で管理/最大32件保持/動静評価)
129
+ # この部分は p.state ではなく self.state にアクセスする
130
+ hist = self.state.setdefault('scalar_hist', [])
131
+ hist.append(scalar)
132
+ if len(hist) >= 33:
133
+ hist.pop(0)
134
+
135
+ # Early Stop判断(静けさの合図) This part is outside the inner loop
136
+ if len(self.state['scalar_hist']) >= 32:
137
+ buf = self.state['scalar_hist']
138
+ avg_abs = sum(abs(s) for s in buf) / len(buf)
139
+ std = sum((s - sum(buf)/len(buf))**2 for s in buf) / len(buf)
140
+ if avg_abs < 0.05 and std < 0.005:
141
+ self.should_stop = True # 外部からこれを見て判断可
142
+
143
+ return loss
144
+
145
+ """
146
+ https://github.com/muooon/EmoNavi
147
+ Neco was developed with inspiration from Lion, Tiger, Cautious, softsign, and Lynx
148
+ which we deeply respect for their lightweight and intelligent design.
149
+ Neco also integrates EmoNAVI to enhance its capabilities.
150
+ """
AMP-compatible/emozeal.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.optim import Optimizer
3
+ import math
4
+
5
+ """
6
+ AMP対応完了(202507) p.data -> p 修正済み
7
+ """
8
+
9
+ # Soft Sign 関数
10
+ def softsign(x):
11
+ return x / (1 + x.abs())
12
+
13
+ class EmoZeal(Optimizer):
14
+ # クラス定義&初期化
15
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999),
16
+ eps=1e-8, weight_decay=0.01):
17
+ defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
18
+ super().__init__(params, defaults)
19
+ self.alpha_prev = getattr(self, 'alpha_prev', 1.0)
20
+ self._init_lr = lr
21
+ self.should_stop = False # 停止フラグの初期化
22
+
23
+ # 感情EMA更新(緊張と安静)
24
+ def _update_ema(self, state, loss_val):
25
+ ema = state.setdefault('ema', {})
26
+ ema['short'] = 0.3 * loss_val + 0.7 * ema.get('short', loss_val)
27
+ ema['long'] = 0.01 * loss_val + 0.99 * ema.get('long', loss_val)
28
+ return ema
29
+
30
+ # 感情スカラー値生成(EMA差分、滑らかな非線形スカラー、tanh 5 * diff で鋭敏さ強調)
31
+ def _compute_scalar(self, ema):
32
+ diff = ema['short'] - ema['long']
33
+ return math.tanh(5 * diff)
34
+
35
+ # Shadow混合比率(> 0.6:70〜90%、 < -0.6:10%、 abs> 0.3:30%、 平時:0%)
36
+ def _decide_ratio(self, scalar):
37
+ if scalar > 0.6:
38
+ return 0.7 + 0.2 * scalar
39
+ elif scalar < -0.6:
40
+ return 0.1
41
+ elif abs(scalar) > 0.3:
42
+ return 0.3
43
+ return 0.0
44
+
45
+ # 損失取得(損失値 loss_val を数値化、感情判定に使用、存在しないパラメータ(更新不要)はスキップ)
46
+ @torch.no_grad()
47
+ def step(self, closure=None):
48
+ loss = closure() if closure is not None else None
49
+ loss_val = loss.item() if loss is not None else 0.0
50
+
51
+ for group in self.param_groups:
52
+ for p in group['params']:
53
+ if p.grad is None:
54
+ continue
55
+
56
+ grad = p.grad
57
+ state = self.state[p]
58
+
59
+ # 感情EMA更新・スカラー生成 (既存ロジックを維持)
60
+ ema = self._update_ema(state, loss_val)
61
+ scalar = self._compute_scalar(ema)
62
+ ratio = self._decide_ratio(scalar)
63
+
64
+ # shadow_param:必要時のみ更新 (既存ロジックを維持)
65
+ if ratio > 0:
66
+ if 'shadow' not in state:
67
+ state['shadow'] = p.clone()
68
+ else:
69
+ p.mul_(1 - ratio).add_(state['shadow'], alpha=ratio)
70
+ state['shadow'].lerp_(p, 0.05)
71
+
72
+ # --- 勾配補正ロジック ---
73
+ # 行列の形状が2次元以上の場合、分散情報ベースのAB近似を使用
74
+ if grad.dim() >= 2:
75
+ # 行と列の2乗平均を計算 (分散の軽量な近似)
76
+ r_sq = torch.mean(grad * grad, dim=tuple(range(1, grad.dim())), keepdim=True).add_(group['eps'])
77
+ c_sq = torch.mean(grad * grad, dim=0, keepdim=True).add_(group['eps'])
78
+
79
+ # 分散情報から勾配の近似行列を生成
80
+ # AB行列として見立てたものを直接生成し更新項を計算する
81
+ # A = sqrt(r_sq), B = sqrt(c_sq) とすることでAB行列の近似を再現
82
+ # これをEMAで平滑化する
83
+ beta1, beta2 = group['betas']
84
+ eps = group['eps']
85
+ lr = group['lr']
86
+ exp_avg = state.setdefault('exp_avg', torch.zeros_like(p))
87
+ blended_grad = grad.mul(1 - beta1).add_(exp_avg, alpha=beta1)
88
+ grad_norm = torch.norm(grad, dtype=torch.float32)
89
+ # scalar < -0.3 の場合のみ SoftSign、それ以外 Cautious (終盤や発散傾向をSSに)
90
+ # p - lr * softsign(blended_grad) (from softsign)
91
+ # p - lr * direction * mask (from Cautious)
92
+ # safe_norm 極値のブレンド勾配に対するスケーリング
93
+ if 0.3 < scalar <= 0.5:
94
+ safe_norm = grad_norm + eps
95
+ modified_grad = softsign(blended_grad) * safe_norm
96
+ p.add_(-lr * modified_grad)
97
+ elif scalar < -0.3:
98
+ direction = blended_grad.sign() # 勾配方向の符号 Cautious 処理
99
+ mask = (direction == grad.sign()) # 過去の勾配と方向が一致する部分のみ更新
100
+ p.add_(direction * mask, alpha = -lr) # Cautious 更新
101
+ else:
102
+ p.add_(softsign(blended_grad), alpha = -lr) # Soft Sign 処理
103
+
104
+ state.setdefault('exp_avg_r', torch.zeros_like(r_sq)).mul_(beta1).add_(torch.sqrt(r_sq), alpha=1 - beta1)
105
+ state.setdefault('exp_avg_c', torch.zeros_like(c_sq)).mul_(beta1).add_(torch.sqrt(c_sq), alpha=1 - beta1)
106
+
107
+ # 再構築した近似勾配の平方根の積で正規化
108
+ # これにより2次モーメントのような役割を果たす
109
+ denom = torch.sqrt(state['exp_avg_r'] * state['exp_avg_c']) + eps
110
+
111
+ # 最終的な更新項を計算
112
+ update_term = grad / denom
113
+
114
+ # 1次元(ベクトル)の勾配補正(decoupled weight decay 構造に近い)
115
+ else:
116
+ exp_avg = state.setdefault('exp_avg', torch.zeros_like(p))
117
+ exp_avg_sq = state.setdefault('exp_avg_sq', torch.zeros_like(p))
118
+ beta1, beta2 = group['betas']
119
+ exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
120
+ exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
121
+ denom = exp_avg_sq.sqrt().add_(group['eps'])
122
+ update_term = exp_avg / denom
123
+
124
+ # 最終的なパラメータ更新 (decoupled weight decayも適用)
125
+ p.add_(p, alpha=-group['weight_decay'] * group['lr'])
126
+ p.add_(update_term, alpha=-group['lr'])
127
+
128
+ # --- Early Stop ロジック (既存ロジックを維持) ---
129
+ hist = self.state.setdefault('scalar_hist', [])
130
+ hist.append(scalar)
131
+ if len(hist) >= 33:
132
+ hist.pop(0)
133
+
134
+ # Early Stop判断
135
+ if len(self.state['scalar_hist']) >= 32:
136
+ buf = self.state['scalar_hist']
137
+ avg_abs = sum(abs(s) for s in buf) / len(buf)
138
+ std = sum((s - sum(buf)/len(buf))**2 for s in buf) / len(buf)
139
+ if avg_abs < 0.05 and std < 0.005:
140
+ self.should_stop = True
141
+
142
+ return loss
143
+
144
+ """
145
+ https://github.com/muooon/EmoNavi
146
+ Zeal is inspired by Adafactor, and EmoFact,
147
+ and its VRAM-friendly design is something everyone loves.
148
+ """
AMP-compatible/logs/fluctuation_and_accuracy_panel.png ADDED

Git LFS Details

  • SHA256: 2a074a42df6fedc22504de259c8e0428ea65eb248a608467cebc8a2c54138807
  • Pointer size: 131 Bytes
  • Size of remote file: 226 kB
AMP-compatible/logs/loss_comparison_panel.png ADDED

Git LFS Details

  • SHA256: 0bbde9135564fe13e4915305d4c5e81903a4b18732f1e2d493bd78ede0d753d4
  • Pointer size: 131 Bytes
  • Size of remote file: 200 kB
AMP-compatible/logs/trec_gpt2_weight_pca_3panel.png ADDED

Git LFS Details

  • SHA256: 8a4a4dc5c2658499fe5cb88e06c800f5d0de87395bb787ddcbd23b75ae04e9b7
  • Pointer size: 131 Bytes
  • Size of remote file: 206 kB
AMP-compatible/logs/trec_squad_step_accuracy.json ADDED
@@ -0,0 +1,2431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "TREC": {
3
+ "EmoNAVI": [
4
+ [
5
+ 10,
6
+ 0.274
7
+ ],
8
+ [
9
+ 20,
10
+ 0.258
11
+ ],
12
+ [
13
+ 30,
14
+ 0.336
15
+ ],
16
+ [
17
+ 40,
18
+ 0.374
19
+ ],
20
+ [
21
+ 50,
22
+ 0.332
23
+ ],
24
+ [
25
+ 60,
26
+ 0.422
27
+ ],
28
+ [
29
+ 70,
30
+ 0.308
31
+ ],
32
+ [
33
+ 80,
34
+ 0.168
35
+ ],
36
+ [
37
+ 90,
38
+ 0.422
39
+ ],
40
+ [
41
+ 100,
42
+ 0.32
43
+ ],
44
+ [
45
+ 110,
46
+ 0.494
47
+ ],
48
+ [
49
+ 120,
50
+ 0.31
51
+ ],
52
+ [
53
+ 130,
54
+ 0.344
55
+ ],
56
+ [
57
+ 140,
58
+ 0.474
59
+ ],
60
+ [
61
+ 150,
62
+ 0.37
63
+ ],
64
+ [
65
+ 160,
66
+ 0.402
67
+ ],
68
+ [
69
+ 170,
70
+ 0.58
71
+ ],
72
+ [
73
+ 180,
74
+ 0.474
75
+ ],
76
+ [
77
+ 190,
78
+ 0.516
79
+ ],
80
+ [
81
+ 200,
82
+ 0.518
83
+ ],
84
+ [
85
+ 210,
86
+ 0.422
87
+ ],
88
+ [
89
+ 220,
90
+ 0.522
91
+ ],
92
+ [
93
+ 230,
94
+ 0.564
95
+ ],
96
+ [
97
+ 240,
98
+ 0.51
99
+ ],
100
+ [
101
+ 250,
102
+ 0.672
103
+ ],
104
+ [
105
+ 260,
106
+ 0.548
107
+ ],
108
+ [
109
+ 270,
110
+ 0.57
111
+ ],
112
+ [
113
+ 280,
114
+ 0.538
115
+ ],
116
+ [
117
+ 290,
118
+ 0.508
119
+ ],
120
+ [
121
+ 300,
122
+ 0.672
123
+ ],
124
+ [
125
+ 310,
126
+ 0.606
127
+ ],
128
+ [
129
+ 320,
130
+ 0.6
131
+ ],
132
+ [
133
+ 330,
134
+ 0.65
135
+ ],
136
+ [
137
+ 340,
138
+ 0.662
139
+ ],
140
+ [
141
+ 350,
142
+ 0.652
143
+ ],
144
+ [
145
+ 360,
146
+ 0.67
147
+ ],
148
+ [
149
+ 370,
150
+ 0.632
151
+ ],
152
+ [
153
+ 380,
154
+ 0.634
155
+ ],
156
+ [
157
+ 390,
158
+ 0.61
159
+ ],
160
+ [
161
+ 400,
162
+ 0.7
163
+ ],
164
+ [
165
+ 410,
166
+ 0.67
167
+ ],
168
+ [
169
+ 420,
170
+ 0.704
171
+ ],
172
+ [
173
+ 430,
174
+ 0.692
175
+ ],
176
+ [
177
+ 440,
178
+ 0.724
179
+ ],
180
+ [
181
+ 450,
182
+ 0.724
183
+ ],
184
+ [
185
+ 460,
186
+ 0.722
187
+ ],
188
+ [
189
+ 470,
190
+ 0.668
191
+ ],
192
+ [
193
+ 480,
194
+ 0.694
195
+ ],
196
+ [
197
+ 490,
198
+ 0.744
199
+ ],
200
+ [
201
+ 500,
202
+ 0.732
203
+ ]
204
+ ],
205
+ "EmoFACT": [
206
+ [
207
+ 10,
208
+ 0.246
209
+ ],
210
+ [
211
+ 20,
212
+ 0.228
213
+ ],
214
+ [
215
+ 30,
216
+ 0.282
217
+ ],
218
+ [
219
+ 40,
220
+ 0.328
221
+ ],
222
+ [
223
+ 50,
224
+ 0.234
225
+ ],
226
+ [
227
+ 60,
228
+ 0.326
229
+ ],
230
+ [
231
+ 70,
232
+ 0.332
233
+ ],
234
+ [
235
+ 80,
236
+ 0.51
237
+ ],
238
+ [
239
+ 90,
240
+ 0.308
241
+ ],
242
+ [
243
+ 100,
244
+ 0.332
245
+ ],
246
+ [
247
+ 110,
248
+ 0.3
249
+ ],
250
+ [
251
+ 120,
252
+ 0.478
253
+ ],
254
+ [
255
+ 130,
256
+ 0.36
257
+ ],
258
+ [
259
+ 140,
260
+ 0.44
261
+ ],
262
+ [
263
+ 150,
264
+ 0.466
265
+ ],
266
+ [
267
+ 160,
268
+ 0.396
269
+ ],
270
+ [
271
+ 170,
272
+ 0.408
273
+ ],
274
+ [
275
+ 180,
276
+ 0.414
277
+ ],
278
+ [
279
+ 190,
280
+ 0.376
281
+ ],
282
+ [
283
+ 200,
284
+ 0.448
285
+ ],
286
+ [
287
+ 210,
288
+ 0.354
289
+ ],
290
+ [
291
+ 220,
292
+ 0.426
293
+ ],
294
+ [
295
+ 230,
296
+ 0.492
297
+ ],
298
+ [
299
+ 240,
300
+ 0.558
301
+ ],
302
+ [
303
+ 250,
304
+ 0.488
305
+ ],
306
+ [
307
+ 260,
308
+ 0.434
309
+ ],
310
+ [
311
+ 270,
312
+ 0.564
313
+ ],
314
+ [
315
+ 280,
316
+ 0.394
317
+ ],
318
+ [
319
+ 290,
320
+ 0.524
321
+ ],
322
+ [
323
+ 300,
324
+ 0.472
325
+ ],
326
+ [
327
+ 310,
328
+ 0.498
329
+ ],
330
+ [
331
+ 320,
332
+ 0.618
333
+ ],
334
+ [
335
+ 330,
336
+ 0.588
337
+ ],
338
+ [
339
+ 340,
340
+ 0.528
341
+ ],
342
+ [
343
+ 350,
344
+ 0.594
345
+ ],
346
+ [
347
+ 360,
348
+ 0.452
349
+ ],
350
+ [
351
+ 370,
352
+ 0.562
353
+ ],
354
+ [
355
+ 380,
356
+ 0.566
357
+ ],
358
+ [
359
+ 390,
360
+ 0.696
361
+ ],
362
+ [
363
+ 400,
364
+ 0.592
365
+ ],
366
+ [
367
+ 410,
368
+ 0.33
369
+ ],
370
+ [
371
+ 420,
372
+ 0.546
373
+ ],
374
+ [
375
+ 430,
376
+ 0.598
377
+ ],
378
+ [
379
+ 440,
380
+ 0.66
381
+ ],
382
+ [
383
+ 450,
384
+ 0.584
385
+ ],
386
+ [
387
+ 460,
388
+ 0.652
389
+ ],
390
+ [
391
+ 470,
392
+ 0.686
393
+ ],
394
+ [
395
+ 480,
396
+ 0.65
397
+ ],
398
+ [
399
+ 490,
400
+ 0.7
401
+ ],
402
+ [
403
+ 500,
404
+ 0.716
405
+ ]
406
+ ],
407
+ "EmoZEAL": [
408
+ [
409
+ 10,
410
+ 0.248
411
+ ],
412
+ [
413
+ 20,
414
+ 0.268
415
+ ],
416
+ [
417
+ 30,
418
+ 0.348
419
+ ],
420
+ [
421
+ 40,
422
+ 0.302
423
+ ],
424
+ [
425
+ 50,
426
+ 0.332
427
+ ],
428
+ [
429
+ 60,
430
+ 0.31
431
+ ],
432
+ [
433
+ 70,
434
+ 0.236
435
+ ],
436
+ [
437
+ 80,
438
+ 0.16
439
+ ],
440
+ [
441
+ 90,
442
+ 0.35
443
+ ],
444
+ [
445
+ 100,
446
+ 0.244
447
+ ],
448
+ [
449
+ 110,
450
+ 0.318
451
+ ],
452
+ [
453
+ 120,
454
+ 0.312
455
+ ],
456
+ [
457
+ 130,
458
+ 0.54
459
+ ],
460
+ [
461
+ 140,
462
+ 0.418
463
+ ],
464
+ [
465
+ 150,
466
+ 0.43
467
+ ],
468
+ [
469
+ 160,
470
+ 0.352
471
+ ],
472
+ [
473
+ 170,
474
+ 0.47
475
+ ],
476
+ [
477
+ 180,
478
+ 0.538
479
+ ],
480
+ [
481
+ 190,
482
+ 0.444
483
+ ],
484
+ [
485
+ 200,
486
+ 0.486
487
+ ],
488
+ [
489
+ 210,
490
+ 0.596
491
+ ],
492
+ [
493
+ 220,
494
+ 0.414
495
+ ],
496
+ [
497
+ 230,
498
+ 0.384
499
+ ],
500
+ [
501
+ 240,
502
+ 0.67
503
+ ],
504
+ [
505
+ 250,
506
+ 0.344
507
+ ],
508
+ [
509
+ 260,
510
+ 0.398
511
+ ],
512
+ [
513
+ 270,
514
+ 0.55
515
+ ],
516
+ [
517
+ 280,
518
+ 0.612
519
+ ],
520
+ [
521
+ 290,
522
+ 0.616
523
+ ],
524
+ [
525
+ 300,
526
+ 0.488
527
+ ],
528
+ [
529
+ 310,
530
+ 0.46
531
+ ],
532
+ [
533
+ 320,
534
+ 0.53
535
+ ],
536
+ [
537
+ 330,
538
+ 0.582
539
+ ],
540
+ [
541
+ 340,
542
+ 0.376
543
+ ],
544
+ [
545
+ 350,
546
+ 0.66
547
+ ],
548
+ [
549
+ 360,
550
+ 0.682
551
+ ],
552
+ [
553
+ 370,
554
+ 0.642
555
+ ],
556
+ [
557
+ 380,
558
+ 0.686
559
+ ],
560
+ [
561
+ 390,
562
+ 0.612
563
+ ],
564
+ [
565
+ 400,
566
+ 0.496
567
+ ],
568
+ [
569
+ 410,
570
+ 0.572
571
+ ],
572
+ [
573
+ 420,
574
+ 0.58
575
+ ],
576
+ [
577
+ 430,
578
+ 0.684
579
+ ],
580
+ [
581
+ 440,
582
+ 0.72
583
+ ],
584
+ [
585
+ 450,
586
+ 0.624
587
+ ],
588
+ [
589
+ 460,
590
+ 0.616
591
+ ],
592
+ [
593
+ 470,
594
+ 0.718
595
+ ],
596
+ [
597
+ 480,
598
+ 0.71
599
+ ],
600
+ [
601
+ 490,
602
+ 0.696
603
+ ],
604
+ [
605
+ 500,
606
+ 0.658
607
+ ]
608
+ ],
609
+ "EmoLYNX": [
610
+ [
611
+ 10,
612
+ 0.22
613
+ ],
614
+ [
615
+ 20,
616
+ 0.228
617
+ ],
618
+ [
619
+ 30,
620
+ 0.242
621
+ ],
622
+ [
623
+ 40,
624
+ 0.222
625
+ ],
626
+ [
627
+ 50,
628
+ 0.398
629
+ ],
630
+ [
631
+ 60,
632
+ 0.278
633
+ ],
634
+ [
635
+ 70,
636
+ 0.298
637
+ ],
638
+ [
639
+ 80,
640
+ 0.312
641
+ ],
642
+ [
643
+ 90,
644
+ 0.254
645
+ ],
646
+ [
647
+ 100,
648
+ 0.306
649
+ ],
650
+ [
651
+ 110,
652
+ 0.484
653
+ ],
654
+ [
655
+ 120,
656
+ 0.336
657
+ ],
658
+ [
659
+ 130,
660
+ 0.31
661
+ ],
662
+ [
663
+ 140,
664
+ 0.33
665
+ ],
666
+ [
667
+ 150,
668
+ 0.376
669
+ ],
670
+ [
671
+ 160,
672
+ 0.354
673
+ ],
674
+ [
675
+ 170,
676
+ 0.394
677
+ ],
678
+ [
679
+ 180,
680
+ 0.558
681
+ ],
682
+ [
683
+ 190,
684
+ 0.566
685
+ ],
686
+ [
687
+ 200,
688
+ 0.578
689
+ ],
690
+ [
691
+ 210,
692
+ 0.548
693
+ ],
694
+ [
695
+ 220,
696
+ 0.53
697
+ ],
698
+ [
699
+ 230,
700
+ 0.57
701
+ ],
702
+ [
703
+ 240,
704
+ 0.594
705
+ ],
706
+ [
707
+ 250,
708
+ 0.596
709
+ ],
710
+ [
711
+ 260,
712
+ 0.64
713
+ ],
714
+ [
715
+ 270,
716
+ 0.6
717
+ ],
718
+ [
719
+ 280,
720
+ 0.544
721
+ ],
722
+ [
723
+ 290,
724
+ 0.464
725
+ ],
726
+ [
727
+ 300,
728
+ 0.548
729
+ ],
730
+ [
731
+ 310,
732
+ 0.622
733
+ ],
734
+ [
735
+ 320,
736
+ 0.678
737
+ ],
738
+ [
739
+ 330,
740
+ 0.658
741
+ ],
742
+ [
743
+ 340,
744
+ 0.658
745
+ ],
746
+ [
747
+ 350,
748
+ 0.686
749
+ ],
750
+ [
751
+ 360,
752
+ 0.694
753
+ ],
754
+ [
755
+ 370,
756
+ 0.686
757
+ ],
758
+ [
759
+ 380,
760
+ 0.61
761
+ ],
762
+ [
763
+ 390,
764
+ 0.606
765
+ ],
766
+ [
767
+ 400,
768
+ 0.666
769
+ ],
770
+ [
771
+ 410,
772
+ 0.68
773
+ ],
774
+ [
775
+ 420,
776
+ 0.638
777
+ ],
778
+ [
779
+ 430,
780
+ 0.626
781
+ ],
782
+ [
783
+ 440,
784
+ 0.546
785
+ ],
786
+ [
787
+ 450,
788
+ 0.49
789
+ ],
790
+ [
791
+ 460,
792
+ 0.508
793
+ ],
794
+ [
795
+ 470,
796
+ 0.546
797
+ ],
798
+ [
799
+ 480,
800
+ 0.562
801
+ ],
802
+ [
803
+ 490,
804
+ 0.62
805
+ ],
806
+ [
807
+ 500,
808
+ 0.624
809
+ ]
810
+ ],
811
+ "EmoNECO": [
812
+ [
813
+ 10,
814
+ 0.164
815
+ ],
816
+ [
817
+ 20,
818
+ 0.198
819
+ ],
820
+ [
821
+ 30,
822
+ 0.29
823
+ ],
824
+ [
825
+ 40,
826
+ 0.372
827
+ ],
828
+ [
829
+ 50,
830
+ 0.236
831
+ ],
832
+ [
833
+ 60,
834
+ 0.384
835
+ ],
836
+ [
837
+ 70,
838
+ 0.23
839
+ ],
840
+ [
841
+ 80,
842
+ 0.406
843
+ ],
844
+ [
845
+ 90,
846
+ 0.476
847
+ ],
848
+ [
849
+ 100,
850
+ 0.226
851
+ ],
852
+ [
853
+ 110,
854
+ 0.398
855
+ ],
856
+ [
857
+ 120,
858
+ 0.234
859
+ ],
860
+ [
861
+ 130,
862
+ 0.37
863
+ ],
864
+ [
865
+ 140,
866
+ 0.568
867
+ ],
868
+ [
869
+ 150,
870
+ 0.554
871
+ ],
872
+ [
873
+ 160,
874
+ 0.542
875
+ ],
876
+ [
877
+ 170,
878
+ 0.578
879
+ ],
880
+ [
881
+ 180,
882
+ 0.488
883
+ ],
884
+ [
885
+ 190,
886
+ 0.574
887
+ ],
888
+ [
889
+ 200,
890
+ 0.58
891
+ ],
892
+ [
893
+ 210,
894
+ 0.616
895
+ ],
896
+ [
897
+ 220,
898
+ 0.59
899
+ ],
900
+ [
901
+ 230,
902
+ 0.612
903
+ ],
904
+ [
905
+ 240,
906
+ 0.622
907
+ ],
908
+ [
909
+ 250,
910
+ 0.624
911
+ ],
912
+ [
913
+ 260,
914
+ 0.684
915
+ ],
916
+ [
917
+ 270,
918
+ 0.63
919
+ ],
920
+ [
921
+ 280,
922
+ 0.694
923
+ ],
924
+ [
925
+ 290,
926
+ 0.632
927
+ ],
928
+ [
929
+ 300,
930
+ 0.66
931
+ ],
932
+ [
933
+ 310,
934
+ 0.708
935
+ ],
936
+ [
937
+ 320,
938
+ 0.552
939
+ ],
940
+ [
941
+ 330,
942
+ 0.622
943
+ ],
944
+ [
945
+ 340,
946
+ 0.676
947
+ ],
948
+ [
949
+ 350,
950
+ 0.732
951
+ ],
952
+ [
953
+ 360,
954
+ 0.702
955
+ ],
956
+ [
957
+ 370,
958
+ 0.646
959
+ ],
960
+ [
961
+ 380,
962
+ 0.726
963
+ ],
964
+ [
965
+ 390,
966
+ 0.758
967
+ ],
968
+ [
969
+ 400,
970
+ 0.73
971
+ ],
972
+ [
973
+ 410,
974
+ 0.698
975
+ ],
976
+ [
977
+ 420,
978
+ 0.754
979
+ ],
980
+ [
981
+ 430,
982
+ 0.732
983
+ ],
984
+ [
985
+ 440,
986
+ 0.666
987
+ ],
988
+ [
989
+ 450,
990
+ 0.756
991
+ ],
992
+ [
993
+ 460,
994
+ 0.728
995
+ ],
996
+ [
997
+ 470,
998
+ 0.74
999
+ ],
1000
+ [
1001
+ 480,
1002
+ 0.776
1003
+ ],
1004
+ [
1005
+ 490,
1006
+ 0.73
1007
+ ],
1008
+ [
1009
+ 500,
1010
+ 0.668
1011
+ ]
1012
+ ],
1013
+ "EmoCLAN": [
1014
+ [
1015
+ 10,
1016
+ 0.15
1017
+ ],
1018
+ [
1019
+ 20,
1020
+ 0.228
1021
+ ],
1022
+ [
1023
+ 30,
1024
+ 0.226
1025
+ ],
1026
+ [
1027
+ 40,
1028
+ 0.166
1029
+ ],
1030
+ [
1031
+ 50,
1032
+ 0.312
1033
+ ],
1034
+ [
1035
+ 60,
1036
+ 0.284
1037
+ ],
1038
+ [
1039
+ 70,
1040
+ 0.338
1041
+ ],
1042
+ [
1043
+ 80,
1044
+ 0.466
1045
+ ],
1046
+ [
1047
+ 90,
1048
+ 0.288
1049
+ ],
1050
+ [
1051
+ 100,
1052
+ 0.326
1053
+ ],
1054
+ [
1055
+ 110,
1056
+ 0.298
1057
+ ],
1058
+ [
1059
+ 120,
1060
+ 0.406
1061
+ ],
1062
+ [
1063
+ 130,
1064
+ 0.322
1065
+ ],
1066
+ [
1067
+ 140,
1068
+ 0.392
1069
+ ],
1070
+ [
1071
+ 150,
1072
+ 0.378
1073
+ ],
1074
+ [
1075
+ 160,
1076
+ 0.372
1077
+ ],
1078
+ [
1079
+ 170,
1080
+ 0.428
1081
+ ],
1082
+ [
1083
+ 180,
1084
+ 0.364
1085
+ ],
1086
+ [
1087
+ 190,
1088
+ 0.472
1089
+ ],
1090
+ [
1091
+ 200,
1092
+ 0.47
1093
+ ],
1094
+ [
1095
+ 210,
1096
+ 0.494
1097
+ ],
1098
+ [
1099
+ 220,
1100
+ 0.528
1101
+ ],
1102
+ [
1103
+ 230,
1104
+ 0.528
1105
+ ],
1106
+ [
1107
+ 240,
1108
+ 0.544
1109
+ ],
1110
+ [
1111
+ 250,
1112
+ 0.57
1113
+ ],
1114
+ [
1115
+ 260,
1116
+ 0.57
1117
+ ],
1118
+ [
1119
+ 270,
1120
+ 0.588
1121
+ ],
1122
+ [
1123
+ 280,
1124
+ 0.564
1125
+ ],
1126
+ [
1127
+ 290,
1128
+ 0.608
1129
+ ],
1130
+ [
1131
+ 300,
1132
+ 0.548
1133
+ ],
1134
+ [
1135
+ 310,
1136
+ 0.578
1137
+ ],
1138
+ [
1139
+ 320,
1140
+ 0.594
1141
+ ],
1142
+ [
1143
+ 330,
1144
+ 0.7
1145
+ ],
1146
+ [
1147
+ 340,
1148
+ 0.688
1149
+ ],
1150
+ [
1151
+ 350,
1152
+ 0.672
1153
+ ],
1154
+ [
1155
+ 360,
1156
+ 0.692
1157
+ ],
1158
+ [
1159
+ 370,
1160
+ 0.46
1161
+ ],
1162
+ [
1163
+ 380,
1164
+ 0.586
1165
+ ],
1166
+ [
1167
+ 390,
1168
+ 0.588
1169
+ ],
1170
+ [
1171
+ 400,
1172
+ 0.636
1173
+ ],
1174
+ [
1175
+ 410,
1176
+ 0.632
1177
+ ],
1178
+ [
1179
+ 420,
1180
+ 0.668
1181
+ ],
1182
+ [
1183
+ 430,
1184
+ 0.684
1185
+ ],
1186
+ [
1187
+ 440,
1188
+ 0.662
1189
+ ],
1190
+ [
1191
+ 450,
1192
+ 0.602
1193
+ ],
1194
+ [
1195
+ 460,
1196
+ 0.572
1197
+ ],
1198
+ [
1199
+ 470,
1200
+ 0.57
1201
+ ],
1202
+ [
1203
+ 480,
1204
+ 0.616
1205
+ ],
1206
+ [
1207
+ 490,
1208
+ 0.668
1209
+ ],
1210
+ [
1211
+ 500,
1212
+ 0.614
1213
+ ]
1214
+ ]
1215
+ },
1216
+ "SQuAD_Tiny": {},
1217
+ "GPT2": {
1218
+ "EmoNAVI": [
1219
+ [
1220
+ 10,
1221
+ 390.03
1222
+ ],
1223
+ [
1224
+ 20,
1225
+ 389.7
1226
+ ],
1227
+ [
1228
+ 30,
1229
+ 144.35
1230
+ ],
1231
+ [
1232
+ 40,
1233
+ 126.51
1234
+ ],
1235
+ [
1236
+ 50,
1237
+ 128.2
1238
+ ],
1239
+ [
1240
+ 60,
1241
+ 109.94
1242
+ ],
1243
+ [
1244
+ 70,
1245
+ 91.23
1246
+ ],
1247
+ [
1248
+ 80,
1249
+ 87.17
1250
+ ],
1251
+ [
1252
+ 90,
1253
+ 98.43
1254
+ ],
1255
+ [
1256
+ 100,
1257
+ 86.26
1258
+ ],
1259
+ [
1260
+ 110,
1261
+ 73.17
1262
+ ],
1263
+ [
1264
+ 120,
1265
+ 68.45
1266
+ ],
1267
+ [
1268
+ 130,
1269
+ 72.42
1270
+ ],
1271
+ [
1272
+ 140,
1273
+ 59.25
1274
+ ],
1275
+ [
1276
+ 150,
1277
+ 76.23
1278
+ ],
1279
+ [
1280
+ 160,
1281
+ 84.56
1282
+ ],
1283
+ [
1284
+ 170,
1285
+ 57.77
1286
+ ],
1287
+ [
1288
+ 180,
1289
+ 60.9
1290
+ ],
1291
+ [
1292
+ 190,
1293
+ 59.26
1294
+ ],
1295
+ [
1296
+ 200,
1297
+ 54.84
1298
+ ],
1299
+ [
1300
+ 210,
1301
+ 52.89
1302
+ ],
1303
+ [
1304
+ 220,
1305
+ 55.69
1306
+ ],
1307
+ [
1308
+ 230,
1309
+ 52.2
1310
+ ],
1311
+ [
1312
+ 240,
1313
+ 45.29
1314
+ ],
1315
+ [
1316
+ 250,
1317
+ 50.06
1318
+ ],
1319
+ [
1320
+ 260,
1321
+ 43.19
1322
+ ],
1323
+ [
1324
+ 270,
1325
+ 43.22
1326
+ ],
1327
+ [
1328
+ 280,
1329
+ 46.27
1330
+ ],
1331
+ [
1332
+ 290,
1333
+ 42.59
1334
+ ],
1335
+ [
1336
+ 300,
1337
+ 41.59
1338
+ ],
1339
+ [
1340
+ 310,
1341
+ 48.33
1342
+ ],
1343
+ [
1344
+ 320,
1345
+ 41.03
1346
+ ],
1347
+ [
1348
+ 330,
1349
+ 31.07
1350
+ ],
1351
+ [
1352
+ 340,
1353
+ 31.38
1354
+ ],
1355
+ [
1356
+ 350,
1357
+ 29.13
1358
+ ],
1359
+ [
1360
+ 360,
1361
+ 25.73
1362
+ ],
1363
+ [
1364
+ 370,
1365
+ 22.76
1366
+ ],
1367
+ [
1368
+ 380,
1369
+ 25.37
1370
+ ],
1371
+ [
1372
+ 390,
1373
+ 28.14
1374
+ ],
1375
+ [
1376
+ 400,
1377
+ 31.61
1378
+ ],
1379
+ [
1380
+ 410,
1381
+ 25.59
1382
+ ],
1383
+ [
1384
+ 420,
1385
+ 19.69
1386
+ ],
1387
+ [
1388
+ 430,
1389
+ 14.81
1390
+ ],
1391
+ [
1392
+ 440,
1393
+ 14.93
1394
+ ],
1395
+ [
1396
+ 450,
1397
+ 16.52
1398
+ ],
1399
+ [
1400
+ 460,
1401
+ 15.26
1402
+ ],
1403
+ [
1404
+ 470,
1405
+ 14.09
1406
+ ],
1407
+ [
1408
+ 480,
1409
+ 18.8
1410
+ ],
1411
+ [
1412
+ 490,
1413
+ 11.57
1414
+ ],
1415
+ [
1416
+ 500,
1417
+ 9.65
1418
+ ]
1419
+ ],
1420
+ "EmoFACT": [
1421
+ [
1422
+ 10,
1423
+ 415036.56
1424
+ ],
1425
+ [
1426
+ 20,
1427
+ 1082.92
1428
+ ],
1429
+ [
1430
+ 30,
1431
+ 173.88
1432
+ ],
1433
+ [
1434
+ 40,
1435
+ 180.84
1436
+ ],
1437
+ [
1438
+ 50,
1439
+ 134.73
1440
+ ],
1441
+ [
1442
+ 60,
1443
+ 119.16
1444
+ ],
1445
+ [
1446
+ 70,
1447
+ 112.98
1448
+ ],
1449
+ [
1450
+ 80,
1451
+ 114.38
1452
+ ],
1453
+ [
1454
+ 90,
1455
+ 95.07
1456
+ ],
1457
+ [
1458
+ 100,
1459
+ 95.62
1460
+ ],
1461
+ [
1462
+ 110,
1463
+ 159.77
1464
+ ],
1465
+ [
1466
+ 120,
1467
+ 122.98
1468
+ ],
1469
+ [
1470
+ 130,
1471
+ 133.86
1472
+ ],
1473
+ [
1474
+ 140,
1475
+ 109.73
1476
+ ],
1477
+ [
1478
+ 150,
1479
+ 287.33
1480
+ ],
1481
+ [
1482
+ 160,
1483
+ 117.7
1484
+ ],
1485
+ [
1486
+ 170,
1487
+ 112.34
1488
+ ],
1489
+ [
1490
+ 180,
1491
+ 97.98
1492
+ ],
1493
+ [
1494
+ 190,
1495
+ 103.99
1496
+ ],
1497
+ [
1498
+ 200,
1499
+ 133.38
1500
+ ],
1501
+ [
1502
+ 210,
1503
+ 94.22
1504
+ ],
1505
+ [
1506
+ 220,
1507
+ 90.21
1508
+ ],
1509
+ [
1510
+ 230,
1511
+ 113.93
1512
+ ],
1513
+ [
1514
+ 240,
1515
+ 77.51
1516
+ ],
1517
+ [
1518
+ 250,
1519
+ 159.87
1520
+ ],
1521
+ [
1522
+ 260,
1523
+ 85.87
1524
+ ],
1525
+ [
1526
+ 270,
1527
+ 91.0
1528
+ ],
1529
+ [
1530
+ 280,
1531
+ 79.88
1532
+ ],
1533
+ [
1534
+ 290,
1535
+ 80.98
1536
+ ],
1537
+ [
1538
+ 300,
1539
+ 67.46
1540
+ ],
1541
+ [
1542
+ 310,
1543
+ 69.69
1544
+ ],
1545
+ [
1546
+ 320,
1547
+ 65.7
1548
+ ],
1549
+ [
1550
+ 330,
1551
+ 54.08
1552
+ ],
1553
+ [
1554
+ 340,
1555
+ 48.44
1556
+ ],
1557
+ [
1558
+ 350,
1559
+ 92.18
1560
+ ],
1561
+ [
1562
+ 360,
1563
+ 46.75
1564
+ ],
1565
+ [
1566
+ 370,
1567
+ 54.9
1568
+ ],
1569
+ [
1570
+ 380,
1571
+ 41.72
1572
+ ],
1573
+ [
1574
+ 390,
1575
+ 40.68
1576
+ ],
1577
+ [
1578
+ 400,
1579
+ 36.56
1580
+ ],
1581
+ [
1582
+ 410,
1583
+ 38.22
1584
+ ],
1585
+ [
1586
+ 420,
1587
+ 40.82
1588
+ ],
1589
+ [
1590
+ 430,
1591
+ 27.42
1592
+ ],
1593
+ [
1594
+ 440,
1595
+ 3203.09
1596
+ ],
1597
+ [
1598
+ 450,
1599
+ 29.91
1600
+ ],
1601
+ [
1602
+ 460,
1603
+ 30.59
1604
+ ],
1605
+ [
1606
+ 470,
1607
+ 32.2
1608
+ ],
1609
+ [
1610
+ 480,
1611
+ 40.88
1612
+ ],
1613
+ [
1614
+ 490,
1615
+ 36.93
1616
+ ],
1617
+ [
1618
+ 500,
1619
+ 35.41
1620
+ ]
1621
+ ],
1622
+ "EmoZEAL": [
1623
+ [
1624
+ 10,
1625
+ 32368984.0
1626
+ ],
1627
+ [
1628
+ 20,
1629
+ 864642.69
1630
+ ],
1631
+ [
1632
+ 30,
1633
+ 702.81
1634
+ ],
1635
+ [
1636
+ 40,
1637
+ 6212.83
1638
+ ],
1639
+ [
1640
+ 50,
1641
+ 418.3
1642
+ ],
1643
+ [
1644
+ 60,
1645
+ 125.87
1646
+ ],
1647
+ [
1648
+ 70,
1649
+ 113.59
1650
+ ],
1651
+ [
1652
+ 80,
1653
+ 115.08
1654
+ ],
1655
+ [
1656
+ 90,
1657
+ 128.86
1658
+ ],
1659
+ [
1660
+ 100,
1661
+ 126.81
1662
+ ],
1663
+ [
1664
+ 110,
1665
+ 219.04
1666
+ ],
1667
+ [
1668
+ 120,
1669
+ 129.47
1670
+ ],
1671
+ [
1672
+ 130,
1673
+ 101.47
1674
+ ],
1675
+ [
1676
+ 140,
1677
+ 104.58
1678
+ ],
1679
+ [
1680
+ 150,
1681
+ 137.48
1682
+ ],
1683
+ [
1684
+ 160,
1685
+ 111.11
1686
+ ],
1687
+ [
1688
+ 170,
1689
+ 125.97
1690
+ ],
1691
+ [
1692
+ 180,
1693
+ 91.28
1694
+ ],
1695
+ [
1696
+ 190,
1697
+ 104.27
1698
+ ],
1699
+ [
1700
+ 200,
1701
+ 86.7
1702
+ ],
1703
+ [
1704
+ 210,
1705
+ 130.79
1706
+ ],
1707
+ [
1708
+ 220,
1709
+ 96.5
1710
+ ],
1711
+ [
1712
+ 230,
1713
+ 88.04
1714
+ ],
1715
+ [
1716
+ 240,
1717
+ 94.72
1718
+ ],
1719
+ [
1720
+ 250,
1721
+ 86.83
1722
+ ],
1723
+ [
1724
+ 260,
1725
+ 105.92
1726
+ ],
1727
+ [
1728
+ 270,
1729
+ 100.96
1730
+ ],
1731
+ [
1732
+ 280,
1733
+ 92.68
1734
+ ],
1735
+ [
1736
+ 290,
1737
+ 93.79
1738
+ ],
1739
+ [
1740
+ 300,
1741
+ 90.26
1742
+ ],
1743
+ [
1744
+ 310,
1745
+ 81.71
1746
+ ],
1747
+ [
1748
+ 320,
1749
+ 62.94
1750
+ ],
1751
+ [
1752
+ 330,
1753
+ 84.68
1754
+ ],
1755
+ [
1756
+ 340,
1757
+ 72.64
1758
+ ],
1759
+ [
1760
+ 350,
1761
+ 88.21
1762
+ ],
1763
+ [
1764
+ 360,
1765
+ 72.25
1766
+ ],
1767
+ [
1768
+ 370,
1769
+ 60.64
1770
+ ],
1771
+ [
1772
+ 380,
1773
+ 65.93
1774
+ ],
1775
+ [
1776
+ 390,
1777
+ 53.45
1778
+ ],
1779
+ [
1780
+ 400,
1781
+ 59.47
1782
+ ],
1783
+ [
1784
+ 410,
1785
+ 59.57
1786
+ ],
1787
+ [
1788
+ 420,
1789
+ 54.25
1790
+ ],
1791
+ [
1792
+ 430,
1793
+ 50.96
1794
+ ],
1795
+ [
1796
+ 440,
1797
+ 48.73
1798
+ ],
1799
+ [
1800
+ 450,
1801
+ 49.77
1802
+ ],
1803
+ [
1804
+ 460,
1805
+ 45.83
1806
+ ],
1807
+ [
1808
+ 470,
1809
+ 59.98
1810
+ ],
1811
+ [
1812
+ 480,
1813
+ 55.26
1814
+ ],
1815
+ [
1816
+ 490,
1817
+ 37.23
1818
+ ],
1819
+ [
1820
+ 500,
1821
+ 33.99
1822
+ ]
1823
+ ],
1824
+ "EmoLYNX": [
1825
+ [
1826
+ 10,
1827
+ 176.55
1828
+ ],
1829
+ [
1830
+ 20,
1831
+ 78.81
1832
+ ],
1833
+ [
1834
+ 30,
1835
+ 51.84
1836
+ ],
1837
+ [
1838
+ 40,
1839
+ 61.16
1840
+ ],
1841
+ [
1842
+ 50,
1843
+ 57.48
1844
+ ],
1845
+ [
1846
+ 60,
1847
+ 61.37
1848
+ ],
1849
+ [
1850
+ 70,
1851
+ 55.12
1852
+ ],
1853
+ [
1854
+ 80,
1855
+ 58.97
1856
+ ],
1857
+ [
1858
+ 90,
1859
+ 54.63
1860
+ ],
1861
+ [
1862
+ 100,
1863
+ 61.92
1864
+ ],
1865
+ [
1866
+ 110,
1867
+ 66.47
1868
+ ],
1869
+ [
1870
+ 120,
1871
+ 51.53
1872
+ ],
1873
+ [
1874
+ 130,
1875
+ 45.01
1876
+ ],
1877
+ [
1878
+ 140,
1879
+ 45.66
1880
+ ],
1881
+ [
1882
+ 150,
1883
+ 34.53
1884
+ ],
1885
+ [
1886
+ 160,
1887
+ 42.89
1888
+ ],
1889
+ [
1890
+ 170,
1891
+ 42.98
1892
+ ],
1893
+ [
1894
+ 180,
1895
+ 42.43
1896
+ ],
1897
+ [
1898
+ 190,
1899
+ 34.23
1900
+ ],
1901
+ [
1902
+ 200,
1903
+ 30.79
1904
+ ],
1905
+ [
1906
+ 210,
1907
+ 36.44
1908
+ ],
1909
+ [
1910
+ 220,
1911
+ 33.72
1912
+ ],
1913
+ [
1914
+ 230,
1915
+ 31.61
1916
+ ],
1917
+ [
1918
+ 240,
1919
+ 33.06
1920
+ ],
1921
+ [
1922
+ 250,
1923
+ 37.53
1924
+ ],
1925
+ [
1926
+ 260,
1927
+ 43.56
1928
+ ],
1929
+ [
1930
+ 270,
1931
+ 31.7
1932
+ ],
1933
+ [
1934
+ 280,
1935
+ 27.2
1936
+ ],
1937
+ [
1938
+ 290,
1939
+ 26.43
1940
+ ],
1941
+ [
1942
+ 300,
1943
+ 31.83
1944
+ ],
1945
+ [
1946
+ 310,
1947
+ 45.07
1948
+ ],
1949
+ [
1950
+ 320,
1951
+ 30.65
1952
+ ],
1953
+ [
1954
+ 330,
1955
+ 23.93
1956
+ ],
1957
+ [
1958
+ 340,
1959
+ 26.46
1960
+ ],
1961
+ [
1962
+ 350,
1963
+ 23.51
1964
+ ],
1965
+ [
1966
+ 360,
1967
+ 28.75
1968
+ ],
1969
+ [
1970
+ 370,
1971
+ 40.6
1972
+ ],
1973
+ [
1974
+ 380,
1975
+ 36.43
1976
+ ],
1977
+ [
1978
+ 390,
1979
+ 31.47
1980
+ ],
1981
+ [
1982
+ 400,
1983
+ 57.82
1984
+ ],
1985
+ [
1986
+ 410,
1987
+ 30.0
1988
+ ],
1989
+ [
1990
+ 420,
1991
+ 30.81
1992
+ ],
1993
+ [
1994
+ 430,
1995
+ 32.15
1996
+ ],
1997
+ [
1998
+ 440,
1999
+ 24.29
2000
+ ],
2001
+ [
2002
+ 450,
2003
+ 27.99
2004
+ ],
2005
+ [
2006
+ 460,
2007
+ 25.83
2008
+ ],
2009
+ [
2010
+ 470,
2011
+ 24.17
2012
+ ],
2013
+ [
2014
+ 480,
2015
+ 24.79
2016
+ ],
2017
+ [
2018
+ 490,
2019
+ 26.67
2020
+ ],
2021
+ [
2022
+ 500,
2023
+ 31.57
2024
+ ]
2025
+ ],
2026
+ "EmoNECO": [
2027
+ [
2028
+ 10,
2029
+ 43.63
2030
+ ],
2031
+ [
2032
+ 20,
2033
+ 41.81
2034
+ ],
2035
+ [
2036
+ 30,
2037
+ 29.89
2038
+ ],
2039
+ [
2040
+ 40,
2041
+ 21.48
2042
+ ],
2043
+ [
2044
+ 50,
2045
+ 34.85
2046
+ ],
2047
+ [
2048
+ 60,
2049
+ 35.27
2050
+ ],
2051
+ [
2052
+ 70,
2053
+ 20.74
2054
+ ],
2055
+ [
2056
+ 80,
2057
+ 22.34
2058
+ ],
2059
+ [
2060
+ 90,
2061
+ 22.61
2062
+ ],
2063
+ [
2064
+ 100,
2065
+ 17.34
2066
+ ],
2067
+ [
2068
+ 110,
2069
+ 20.47
2070
+ ],
2071
+ [
2072
+ 120,
2073
+ 30.86
2074
+ ],
2075
+ [
2076
+ 130,
2077
+ 19.76
2078
+ ],
2079
+ [
2080
+ 140,
2081
+ 18.27
2082
+ ],
2083
+ [
2084
+ 150,
2085
+ 17.08
2086
+ ],
2087
+ [
2088
+ 160,
2089
+ 17.37
2090
+ ],
2091
+ [
2092
+ 170,
2093
+ 19.73
2094
+ ],
2095
+ [
2096
+ 180,
2097
+ 18.19
2098
+ ],
2099
+ [
2100
+ 190,
2101
+ 17.4
2102
+ ],
2103
+ [
2104
+ 200,
2105
+ 12.44
2106
+ ],
2107
+ [
2108
+ 210,
2109
+ 9.06
2110
+ ],
2111
+ [
2112
+ 220,
2113
+ 7.12
2114
+ ],
2115
+ [
2116
+ 230,
2117
+ 6.33
2118
+ ],
2119
+ [
2120
+ 240,
2121
+ 7.67
2122
+ ],
2123
+ [
2124
+ 250,
2125
+ 5.18
2126
+ ],
2127
+ [
2128
+ 260,
2129
+ 5.39
2130
+ ],
2131
+ [
2132
+ 270,
2133
+ 5.6
2134
+ ],
2135
+ [
2136
+ 280,
2137
+ 5.41
2138
+ ],
2139
+ [
2140
+ 290,
2141
+ 5.32
2142
+ ],
2143
+ [
2144
+ 300,
2145
+ 5.23
2146
+ ],
2147
+ [
2148
+ 310,
2149
+ 4.38
2150
+ ],
2151
+ [
2152
+ 320,
2153
+ 4.73
2154
+ ],
2155
+ [
2156
+ 330,
2157
+ 4.98
2158
+ ],
2159
+ [
2160
+ 340,
2161
+ 5.48
2162
+ ],
2163
+ [
2164
+ 350,
2165
+ 5.61
2166
+ ],
2167
+ [
2168
+ 360,
2169
+ 4.57
2170
+ ],
2171
+ [
2172
+ 370,
2173
+ 4.24
2174
+ ],
2175
+ [
2176
+ 380,
2177
+ 4.71
2178
+ ],
2179
+ [
2180
+ 390,
2181
+ 3.63
2182
+ ],
2183
+ [
2184
+ 400,
2185
+ 3.62
2186
+ ],
2187
+ [
2188
+ 410,
2189
+ 2.83
2190
+ ],
2191
+ [
2192
+ 420,
2193
+ 2.96
2194
+ ],
2195
+ [
2196
+ 430,
2197
+ 2.78
2198
+ ],
2199
+ [
2200
+ 440,
2201
+ 3.06
2202
+ ],
2203
+ [
2204
+ 450,
2205
+ 3.2
2206
+ ],
2207
+ [
2208
+ 460,
2209
+ 3.03
2210
+ ],
2211
+ [
2212
+ 470,
2213
+ 3.05
2214
+ ],
2215
+ [
2216
+ 480,
2217
+ 2.97
2218
+ ],
2219
+ [
2220
+ 490,
2221
+ 3.08
2222
+ ],
2223
+ [
2224
+ 500,
2225
+ 3.6
2226
+ ]
2227
+ ],
2228
+ "EmoCLAN": [
2229
+ [
2230
+ 10,
2231
+ 2646.54
2232
+ ],
2233
+ [
2234
+ 20,
2235
+ 237.9
2236
+ ],
2237
+ [
2238
+ 30,
2239
+ 317.26
2240
+ ],
2241
+ [
2242
+ 40,
2243
+ 145.22
2244
+ ],
2245
+ [
2246
+ 50,
2247
+ 148.97
2248
+ ],
2249
+ [
2250
+ 60,
2251
+ 225.55
2252
+ ],
2253
+ [
2254
+ 70,
2255
+ 104.09
2256
+ ],
2257
+ [
2258
+ 80,
2259
+ 92.09
2260
+ ],
2261
+ [
2262
+ 90,
2263
+ 107.5
2264
+ ],
2265
+ [
2266
+ 100,
2267
+ 130.0
2268
+ ],
2269
+ [
2270
+ 110,
2271
+ 97.33
2272
+ ],
2273
+ [
2274
+ 120,
2275
+ 87.69
2276
+ ],
2277
+ [
2278
+ 130,
2279
+ 86.27
2280
+ ],
2281
+ [
2282
+ 140,
2283
+ 77.78
2284
+ ],
2285
+ [
2286
+ 150,
2287
+ 66.3
2288
+ ],
2289
+ [
2290
+ 160,
2291
+ 84.44
2292
+ ],
2293
+ [
2294
+ 170,
2295
+ 70.21
2296
+ ],
2297
+ [
2298
+ 180,
2299
+ 71.12
2300
+ ],
2301
+ [
2302
+ 190,
2303
+ 60.57
2304
+ ],
2305
+ [
2306
+ 200,
2307
+ 58.8
2308
+ ],
2309
+ [
2310
+ 210,
2311
+ 56.19
2312
+ ],
2313
+ [
2314
+ 220,
2315
+ 64.68
2316
+ ],
2317
+ [
2318
+ 230,
2319
+ 58.71
2320
+ ],
2321
+ [
2322
+ 240,
2323
+ 72.35
2324
+ ],
2325
+ [
2326
+ 250,
2327
+ 62.81
2328
+ ],
2329
+ [
2330
+ 260,
2331
+ 62.0
2332
+ ],
2333
+ [
2334
+ 270,
2335
+ 62.57
2336
+ ],
2337
+ [
2338
+ 280,
2339
+ 55.06
2340
+ ],
2341
+ [
2342
+ 290,
2343
+ 52.29
2344
+ ],
2345
+ [
2346
+ 300,
2347
+ 55.84
2348
+ ],
2349
+ [
2350
+ 310,
2351
+ 55.93
2352
+ ],
2353
+ [
2354
+ 320,
2355
+ 61.57
2356
+ ],
2357
+ [
2358
+ 330,
2359
+ 66.8
2360
+ ],
2361
+ [
2362
+ 340,
2363
+ 64.74
2364
+ ],
2365
+ [
2366
+ 350,
2367
+ 67.67
2368
+ ],
2369
+ [
2370
+ 360,
2371
+ 64.73
2372
+ ],
2373
+ [
2374
+ 370,
2375
+ 60.54
2376
+ ],
2377
+ [
2378
+ 380,
2379
+ 57.82
2380
+ ],
2381
+ [
2382
+ 390,
2383
+ 52.32
2384
+ ],
2385
+ [
2386
+ 400,
2387
+ 52.11
2388
+ ],
2389
+ [
2390
+ 410,
2391
+ 51.81
2392
+ ],
2393
+ [
2394
+ 420,
2395
+ 50.83
2396
+ ],
2397
+ [
2398
+ 430,
2399
+ 49.49
2400
+ ],
2401
+ [
2402
+ 440,
2403
+ 41.85
2404
+ ],
2405
+ [
2406
+ 450,
2407
+ 39.5
2408
+ ],
2409
+ [
2410
+ 460,
2411
+ 37.8
2412
+ ],
2413
+ [
2414
+ 470,
2415
+ 42.96
2416
+ ],
2417
+ [
2418
+ 480,
2419
+ 41.26
2420
+ ],
2421
+ [
2422
+ 490,
2423
+ 38.94
2424
+ ],
2425
+ [
2426
+ 500,
2427
+ 45.65
2428
+ ]
2429
+ ]
2430
+ }
2431
+ }
AMP-compatible/logs/trec_weights_log.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1039ad77b7d2814784414fd1e9b769a61286f264a93e82ba7dd5e9bffd847b1c
3
+ size 11052986