muooon commited on
Commit
937d8ca
·
verified ·
1 Parent(s): 8aec887

Upload 5 files

Browse files
optimizer/Use_Kohya-sd-script.txt ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Kohya-sd-script での使用法
2
+
3
+ これら Emoシリーズ を Kohya-sd-script で簡単につかうには、
4
+ このフォルダをこのまま Kohya-sd-script の "sd-script" フォルダに配置してください
5
+
6
+ sd-script/optimizer
7
+
8
+ この配置にした場合、
9
+
10
+ --optimizer_type=optimizer.emonavi.EmoNavi
11
+ --optimizer_type=optimizer.emofact.EmoFact
12
+ --optimizer_type=optimizer.emolynx.EmoLynx
13
+
14
+ このように指定するだけで各Optimizerを利用できます(いずれかひとつを指定してください)
15
+ ---
16
+ Kohya-sd-script の柔軟な構成により、これらをすぐ試せます
17
+ Kohya-sd-script の開発者と協力者の皆さまに深く感謝します
18
+ Kohya-sd-script: https://github.com/kohya-ss/sd-scripts
19
+
20
+ fact は、Adafactor を参考にしました
21
+ Lynx は、Lion と Tiger を参考にしました
22
+ Emoシリーズはこれまでの様々なOptimizerの成果に学び完成しました
23
+ すべての開発者の皆さまに感謝します
24
+
25
+
26
+ Usage with Kohya-sd-script
27
+
28
+ To easily use these Emo series with Kohya-sd-script,
29
+ simply place this folder as-is into the "sd-scripts" folder within your Kohya-sd-script installation:
30
+
31
+ sd-scripts/optimizer
32
+
33
+ With this setup,
34
+
35
+ --optimizer_type=optimizer.emonavi.EmoNavi
36
+ --optimizer_type=optimizer.emofact.EmoFact
37
+ --optimizer_type=optimizer.emolynx.EmoLynx
38
+
39
+ You can utilize each optimizer by simply specifying one of the above.
40
+
41
+ Thanks to the flexible configuration of Kohya-sd-script, you can try these out right away. We extend our deepest gratitude to the developers and contributors of Kohya-sd-script:
42
+ Kohya-sd-script: https://github.com/kohya-ss/sd-scripts
43
+
44
+ Fact was inspired by Adafactor.
45
+ Lynx was inspired by Lion and Tiger.
46
+ The Emo series was completed by learning from the achievements of various optimizers developed to date. We are grateful to all developers.
optimizer/__init__.py ADDED
File without changes
optimizer/emofact.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.optim import Optimizer
3
+ import math
4
+
5
+ class EmoFact(Optimizer):
6
+ # クラス定義&初期化
7
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999),
8
+ eps=1e-8, weight_decay=0.01):
9
+ defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
10
+ super().__init__(params, defaults)
11
+
12
+ # 感情EMA更新(緊張と安静)
13
+ def _update_ema(self, state, loss_val):
14
+ ema = state.setdefault('ema', {})
15
+ ema['short'] = 0.3 * loss_val + 0.7 * ema.get('short', loss_val)
16
+ ema['long'] = 0.01 * loss_val + 0.99 * ema.get('long', loss_val)
17
+ return ema
18
+
19
+ # 感情スカラー値生成(EMA差分、滑らかな非線形スカラー、tanh 5 * diff で鋭敏さ強調)
20
+ def _compute_scalar(self, ema):
21
+ diff = ema['short'] - ema['long']
22
+ return math.tanh(5 * diff)
23
+
24
+ # Shadow混合比率(> 0.6:70〜90%、 < 0.6:10%、 > 0.3:30%、 平時:0%)
25
+ def _decide_ratio(self, scalar):
26
+ if scalar > 0.6:
27
+ return 0.7 + 0.2 * scalar
28
+ elif scalar < -0.6:
29
+ return 0.1
30
+ elif abs(scalar) > 0.3:
31
+ return 0.3
32
+ return 0.0
33
+
34
+ # 損失取得(損失値 loss_val を数値化、感情判定に使用、存在しないパラメータ(更新不要)はスキップ)
35
+ @torch.no_grad()
36
+ def step(self, closure=None):
37
+ loss = closure() if closure is not None else None
38
+ loss_val = loss.item() if loss is not None else 0.0
39
+
40
+ for group in self.param_groups:
41
+ for p in group['params']:
42
+ if p.grad is None:
43
+ continue
44
+
45
+ grad = p.grad.data
46
+ state = self.state[p]
47
+
48
+ # 感情EMA更新・スカラー生成 (既存ロジックを維持)
49
+ ema = self._update_ema(state, loss_val)
50
+ scalar = self._compute_scalar(ema)
51
+ ratio = self._decide_ratio(scalar)
52
+
53
+ # shadow_param:必要時のみ更新 (既存ロジックを維持)
54
+ if ratio > 0:
55
+ if 'shadow' not in state:
56
+ state['shadow'] = p.data.clone()
57
+ else:
58
+ p.data.mul_(1 - ratio).add_(state['shadow'], alpha=ratio)
59
+ state['shadow'].lerp_(p.data, 0.05)
60
+
61
+ # --- 新しい勾配補正ロジック ---
62
+ # 行列の形状が2次元以上の場合、分散情報ベースのAB近似を使用
63
+ if grad.dim() >= 2:
64
+ # 行と列の2乗平均を計算 (分散の軽量な近似)
65
+ r_sq = torch.mean(grad * grad, dim=tuple(range(1, grad.dim())), keepdim=True).add_(group['eps'])
66
+ c_sq = torch.mean(grad * grad, dim=0, keepdim=True).add_(group['eps'])
67
+
68
+ # 分散情報から勾配の近似行列を生成
69
+ # AB行列として見立てたものを直接生成し更新項を計算する
70
+ # A = sqrt(r_sq), B = sqrt(c_sq) とすることでAB行列の近似を再現
71
+ # これをEMAで平滑化する
72
+ beta1, beta2 = group['betas']
73
+
74
+ state.setdefault('exp_avg_r', torch.zeros_like(r_sq)).mul_(beta1).add_(torch.sqrt(r_sq), alpha=1 - beta1)
75
+ state.setdefault('exp_avg_c', torch.zeros_like(c_sq)).mul_(beta1).add_(torch.sqrt(c_sq), alpha=1 - beta1)
76
+
77
+ # 再構築した近似勾配の平方根の積で正規化
78
+ # これにより2次モーメントのような役割を果たす
79
+ denom = torch.sqrt(state['exp_avg_r'] * state['exp_avg_c']).add_(group['eps'])
80
+
81
+ # 最終的な更新項を計算
82
+ update_term = grad / denom
83
+
84
+ # 1次元(ベクトル)の勾配補正(decoupled weight decay 構造に近い)
85
+ else:
86
+ exp_avg = state.setdefault('exp_avg', torch.zeros_like(p.data))
87
+ exp_avg_sq = state.setdefault('exp_avg_sq', torch.zeros_like(p.data))
88
+ beta1, beta2 = group['betas']
89
+ exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
90
+ exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
91
+ denom = exp_avg_sq.sqrt().add_(group['eps'])
92
+ update_term = exp_avg / denom
93
+
94
+ # 最終的なパラメータ更新 (decoupled weight decayも適用)
95
+ p.data.add_(p.data, alpha=-group['weight_decay'] * group['lr'])
96
+ p.data.add_(update_term, alpha=-group['lr'])
97
+
98
+ # --- Early Stop ロジック (既存ロジックを維持) ---
99
+ hist = self.state.setdefault('scalar_hist', [])
100
+ hist.append(scalar)
101
+ if len(hist) > 32:
102
+ hist.pop(0)
103
+
104
+ # Early Stop判断
105
+ if len(self.state['scalar_hist']) >= 32:
106
+ buf = self.state['scalar_hist']
107
+ avg_abs = sum(abs(s) for s in buf) / len(buf)
108
+ std = sum((s - sum(buf)/len(buf))**2 for s in buf) / len(buf)
109
+ if avg_abs < 0.05 and std < 0.005:
110
+ self.should_stop = True
111
+
112
+ return loss
113
+
114
+ """
115
+ Fact is inspired by Adafactor,
116
+ and its VRAM-friendly design is something everyone loves.
117
+ """
optimizer/emolynx.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.optim import Optimizer
3
+ import math
4
+ from typing import Tuple, Callable, Union
5
+
6
+ # Helper function (Lynx)
7
+ def exists(val):
8
+ return val is not None
9
+
10
+ class EmoLynx(Optimizer):
11
+ # クラス定義&初期化
12
+ def __init__(self, params: Union[list, torch.nn.Module], lr=1e-3, betas=(0.9, 0.99),
13
+ # lynx用ベータ・互換性の追加(lynx用beta1・beta2)
14
+ eps=1e-8, weight_decay=0.01, decoupled_weight_decay: bool = False):
15
+
16
+ defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
17
+ super().__init__(params, defaults)
18
+
19
+ # lynxに応じてウェイト減衰のため保存
20
+ self._init_lr = lr
21
+ self.decoupled_wd = decoupled_weight_decay
22
+ self.should_stop = False # 停止フラグの初期化
23
+
24
+ # 感情EMA更新(緊張と安静)
25
+ def _update_ema(self, state, loss_val):
26
+ ema = state.setdefault('ema', {})
27
+ ema['short'] = 0.3 * loss_val + 0.7 * ema.get('short', loss_val)
28
+ ema['long'] = 0.01 * loss_val + 0.99 * ema.get('long', loss_val)
29
+ return ema
30
+
31
+ # 感情スカラー値生成(EMA差分、滑らかな非線形スカラー、tanh 5 * diff で鋭敏さ強調)
32
+ def _compute_scalar(self, ema):
33
+ diff = ema['short'] - ema['long']
34
+ return math.tanh(5 * diff)
35
+
36
+ # Shadow混合比率(> 0.6:70〜90%、 < 0.6:10%、 > 0.3:30%、 平時:0%)
37
+ def _decide_ratio(self, scalar):
38
+ if scalar > 0.6:
39
+ return 0.7 + 0.2 * scalar
40
+ elif scalar < -0.6:
41
+ return 0.1
42
+ elif abs(scalar) > 0.3:
43
+ return 0.3
44
+ return 0.0
45
+
46
+ # 損失取得(損失値 loss_val を数値化、感情判定に使用、存在しないパラメータ(更新不要)はスキップ)
47
+ @torch.no_grad()
48
+ def step(self, closure: Callable | None = None): # クロージャの型ヒントを追加
49
+ loss = None
50
+ if exists(closure): # 一貫性のためにexistsヘルパーを使う
51
+ with torch.enable_grad():
52
+ loss = closure()
53
+ loss_val = loss.item() if loss is not None else 0.0
54
+
55
+ for group in self.param_groups:
56
+ # リンクス共通パラメータ抽出
57
+ lr, wd, beta1, beta2 = group['lr'], group['weight_decay'], *group['betas']
58
+
59
+ # ウェイト減衰の処理を分離 (from lynx)
60
+ _wd_actual = wd
61
+ if self.decoupled_wd:
62
+ _wd_actual /= self._init_lr # 非連結時ウェイト減衰調整
63
+
64
+ for p in filter(lambda p: exists(p.grad), group['params']): # PGチェックにフィルタ
65
+
66
+ grad = p.grad # PG直接使用(計算に".data"不要)
67
+ state = self.state[p]
68
+
69
+ # EMA更新・スカラー生成(EMA差分からスカラーを生成しスパイク比率を決定)
70
+ ema = self._update_ema(state, loss_val)
71
+ scalar = self._compute_scalar(ema)
72
+ ratio = self._decide_ratio(scalar)
73
+
74
+ # shadow_param:必要時のみ更新(スパイク部分に現在値を5%ずつ追従させる動的履歴)
75
+ if ratio > 0:
76
+ if 'shadow' not in state:
77
+ state['shadow'] = p.data.clone()
78
+ else:
79
+ p.data.mul_(1 - ratio).add_(state['shadow'], alpha=ratio)
80
+ state['shadow'].lerp_(p.data, 0.05)
81
+ # lynx更新前 p.data で shadow 更新(現在値を5%ずつ追従)
82
+ # p.data.mul_(1 - ratio).add_(state['shadow'], alpha=ratio)
83
+ # EmoNavi: p.data = p.data * (1-ratio) + shadow * ratio
84
+
85
+ # --- Start Lynx Gradient Update Logic ---
86
+
87
+ # lynx初期化(exp_avg_sq)
88
+ if 'exp_avg' not in state:
89
+ state['exp_avg'] = torch.zeros_like(p)
90
+ exp_avg = state['exp_avg']
91
+
92
+ # Stepweight decay (from lynx): p.data = p.data * (1 - lr * wd)
93
+ # decoupled_wd 考慮 _wd_actual 使用(EmoNaviのwdは最後に適用)
94
+ p.data.mul_(1. - lr * _wd_actual)
95
+
96
+ # 勾配ブレンド
97
+ # m_t = beta1 * exp_avg_prev + (1 - beta1) * grad
98
+ blended_grad = grad.mul(1. - beta1).add_(exp_avg, alpha=beta1)
99
+
100
+ # p: p.data = p.data - lr * sign(blended_grad)
101
+ p.data.add_(blended_grad.sign_(), alpha = -lr)
102
+
103
+ # exp_avg = beta2 * exp_avg + (1 - beta2) * grad
104
+ exp_avg.mul_(beta2).add_(grad, alpha = 1. - beta2)
105
+
106
+ # --- End Lynx Gradient Update Logic ---
107
+
108
+ # Early Stop用 scalar記録(バッファ共通で管理/最大32件保持/動静評価)
109
+ # この部分は p.state ではなく self.state ���アクセスする
110
+ hist = self.state.setdefault('scalar_hist', [])
111
+ hist.append(scalar)
112
+ if len(hist) > 32:
113
+ hist.pop(0)
114
+
115
+ # Early Stop判断(静けさの合図) - This part is outside the inner loop
116
+ if len(self.state['scalar_hist']) >= 32:
117
+ buf = self.state['scalar_hist']
118
+ avg_abs = sum(abs(s) for s in buf) / len(buf)
119
+ std = sum((s - sum(buf)/len(buf))**2 for s in buf) / len(buf)
120
+ if avg_abs < 0.05 and std < 0.005:
121
+ self.should_stop = True # 💡 外部からこれを見て判断可
122
+
123
+ return loss
124
+
125
+ """
126
+ Lynx was developed with inspiration from Lion and Tiger,
127
+ which we deeply respect for their lightweight and intelligent design.
128
+ Lynx also integrates EmoNAVI to enhance its capabilities.
129
+ """
optimizer/emonavi.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.optim import Optimizer
3
+ import math
4
+
5
+ class EmoNavi(Optimizer):
6
+ # クラス定義&初期化
7
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999),
8
+ eps=1e-8, weight_decay=0.01):
9
+ defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
10
+ super().__init__(params, defaults)
11
+ # 感情EMA更新(緊張と安静)
12
+ def _update_ema(self, state, loss_val):
13
+ ema = state.setdefault('ema', {})
14
+ ema['short'] = 0.3 * loss_val + 0.7 * ema.get('short', loss_val)
15
+ ema['long'] = 0.01 * loss_val + 0.99 * ema.get('long', loss_val)
16
+ return ema
17
+ # 感情スカラー値生成(EMA差分、滑らかな非線形スカラー、tanh 5 * diff で鋭敏さ強調)
18
+ def _compute_scalar(self, ema):
19
+ diff = ema['short'] - ema['long']
20
+ return math.tanh(5 * diff)
21
+ # Shadow混合比率(> 0.6:70〜90%、 < 0.6:10%、 > 0.3:30%、 平時:0%)
22
+ def _decide_ratio(self, scalar):
23
+ if scalar > 0.6:
24
+ return 0.7 + 0.2 * scalar
25
+ elif scalar < -0.6:
26
+ return 0.1
27
+ elif abs(scalar) > 0.3:
28
+ return 0.3
29
+ return 0.0
30
+ # 損失取得(損失値 loss_val を数値化、感情判定に使用、存在しないパラメータ(更新不要)はスキップ)
31
+ @torch.no_grad()
32
+ def step(self, closure=None):
33
+ loss = closure() if closure is not None else None
34
+ loss_val = loss.item() if loss is not None else 0.0
35
+
36
+ for group in self.param_groups:
37
+ for p in group['params']:
38
+ if p.grad is None:
39
+ continue
40
+
41
+ grad = p.grad.data
42
+ state = self.state[p]
43
+
44
+ # EMA更新・スカラー生成(EMA差分からスカラーを生成しスパイク比率を決定)
45
+ ema = self._update_ema(state, loss_val)
46
+ scalar = self._compute_scalar(ema)
47
+ ratio = self._decide_ratio(scalar)
48
+
49
+ # shadow_param:必要時のみ更新(スパイク部分に現在値を5%ずつ追従させる動的履歴)
50
+ if ratio > 0:
51
+ if 'shadow' not in state:
52
+ state['shadow'] = p.data.clone()
53
+ else:
54
+ p.data.mul_(1 - ratio).add_(state['shadow'], alpha=ratio)
55
+ state['shadow'].lerp_(p.data, 0.05)
56
+
57
+ # スカラー生成:短期と長期EMAの差分から信号を得る(高ぶりの強さ)
58
+ # 混合比率:スカラーが閾値を超える場合にのみ計算される(信頼できる感情信号かどうかの選別)
59
+ # → スカラー値が小さい場合は ratio = 0 となり、shadow混合は行われない
60
+ # → 信頼できる強い差分のときのみ感情機構が発動する(暗黙の信頼度判定)
61
+
62
+ # 1次・2次モーメントを使った勾配補正(decoupled weight decay 構造に近い)
63
+ exp_avg = state.setdefault('exp_avg', torch.zeros_like(p.data))
64
+ exp_avg_sq = state.setdefault('exp_avg_sq', torch.zeros_like(p.data))
65
+ beta1, beta2 = group['betas']
66
+ exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
67
+ exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
68
+ denom = exp_avg_sq.sqrt().add_(group['eps'])
69
+
70
+ step_size = group['lr']
71
+ if group['weight_decay']:
72
+ p.data.add_(p.data, alpha=-group['weight_decay'] * step_size)
73
+ p.data.addcdiv_(exp_avg, denom, value=-step_size)
74
+
75
+ # 感情機構の発火が収まり"十分に安定"していることを外部伝達できる(自動停止ロジックではない)
76
+ # Early Stop用 scalar 記録(バッファ共通で管理/最大32件保持/動静評価)
77
+ hist = self.state.setdefault('scalar_hist', [])
78
+ hist.append(scalar)
79
+ if len(hist) > 32:
80
+ hist.pop(0)
81
+
82
+ # Early Stop判断(静けさの合図)
83
+ if len(self.state['scalar_hist']) >= 32:
84
+ buf = self.state['scalar_hist']
85
+ avg_abs = sum(abs(s) for s in buf) / len(buf)
86
+ std = sum((s - sum(buf)/len(buf))**2 for s in buf) / len(buf)
87
+ if avg_abs < 0.05 and std < 0.005:
88
+ self.should_stop = True # 💡 外部からこれを見て判断可
89
+
90
+ # 32ステップ分のスカラー値の静かな条件を満たした時"フラグ" should_stop = True になるだけ
91
+
92
+ return loss
93
+
94
+ # https://github.com/muooon/EmoNavi
95
+ # An emotion-driven optimizer that feels loss and navigates accordingly.
96
+ # Don't think. Feel. Don't stop. Keep running. Believe in what's beyond.