KublaiKhan1 commited on
Commit
aa91712
·
verified ·
1 Parent(s): 5238eee

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. dt0_1/targets_dt01.py +153 -0
dt0_1/targets_dt01.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import jax
2
+ import jax.numpy as jnp
3
+ import numpy as np
4
+
5
+ def get_targets(FLAGS, key, train_state, images, labels, force_t=-1, force_dt=-1):
6
+ label_key, time_key, noise_key = jax.random.split(key, 3)
7
+ info = {}
8
+
9
+ # 1) =========== Sample dt. ============
10
+ bootstrap_batchsize = FLAGS.batch_size // FLAGS.model['bootstrap_every']
11
+ log2_sections = np.log2(FLAGS.model['denoise_timesteps']).astype(np.int32)
12
+ if FLAGS.model['bootstrap_dt_bias'] == 0:
13
+ dt_base = jnp.repeat(log2_sections - 1 - jnp.arange(log2_sections), bootstrap_batchsize // log2_sections)
14
+ dt_base = jnp.concatenate([dt_base, jnp.zeros(bootstrap_batchsize-dt_base.shape[0],)])
15
+ num_dt_cfg = bootstrap_batchsize // log2_sections
16
+ else:
17
+ dt_base = jnp.repeat(log2_sections - 1 - jnp.arange(log2_sections-2), (bootstrap_batchsize // 2) // log2_sections)
18
+ dt_base = jnp.concatenate([dt_base, jnp.ones(bootstrap_batchsize // 4), jnp.zeros(bootstrap_batchsize // 4)])
19
+ dt_base = jnp.concatenate([dt_base, jnp.zeros(bootstrap_batchsize-dt_base.shape[0],)])
20
+ num_dt_cfg = (bootstrap_batchsize // 2) // log2_sections
21
+ force_dt_vec = jnp.ones(bootstrap_batchsize, dtype=jnp.float32) * force_dt
22
+
23
+ #So there's a few things.
24
+ #We can make dt_base be actually random
25
+ #We can make it 0-1 (still log).
26
+ #We can make eerything continuous
27
+
28
+ zero_one = True
29
+ dt_base = jnp.where(force_dt_vec != -1, force_dt_vec, dt_base)
30
+
31
+
32
+ dt_base = dt_base / 7.0
33
+ dt = 1 / (2 ** (dt_base * 7.0)) # [1, 1/2, 1/4, 1/8, 1/16, 1/32]
34
+ #Even when we rescale dt_base, this should remain the same I thnk
35
+
36
+
37
+ #0-1 more
38
+ dt_base_bootstrap = dt_base + 1/7.0
39
+ dt_bootstrap = dt / 2
40
+ print('dt base', dt_base)#Basuically 6 through 0, split over the size (1/8 batch)
41
+ print('dt', dt)
42
+ print("dt base boot", dt_base_bootstrap)
43
+ print("dt bootstrap", dt_bootstrap)
44
+
45
+
46
+ # 2) =========== Sample t. ============
47
+ #dt_sections = jnp.power(2, dt_base) # [1, 2, 4, 8, 16, 32]
48
+
49
+ #Even in 128, this is just a way to basically get 1/64, 1/32.... because we want to select from 0/64-64.
50
+ #This would be continuous at some point.
51
+ #For now, we want to remove the requirement of dt_base being log_2(128)
52
+ dt_sections = jnp.power(2, dt_base * 7.0)
53
+ #This should just be 0-1...? that doesn't work with discrete. for now
54
+
55
+ print("dt sections", dt_sections)
56
+ t = jax.random.randint(time_key, (bootstrap_batchsize,), minval=0, maxval=dt_sections).astype(jnp.float32)
57
+ t = t / dt_sections #t is between 0 and the dt section, not random
58
+ force_t_vec = jnp.ones(bootstrap_batchsize, dtype=jnp.float32) * force_t
59
+ t = jnp.where(force_t_vec != -1, force_t_vec, t)
60
+ t_full = t[:, None, None, None]
61
+ print("t", t)
62
+
63
+ # 3) =========== Generate Bootstrap Targets ============
64
+ x_1 = images[:bootstrap_batchsize]
65
+ x_0 = jax.random.normal(noise_key, x_1.shape)
66
+ x_t = (1 - (1 - 1e-5) * t_full) * x_0 + t_full * x_1
67
+ bst_labels = labels[:bootstrap_batchsize]
68
+ call_model_fn = train_state.call_model if FLAGS.model['bootstrap_ema'] == 0 else train_state.call_model_ema
69
+ if not FLAGS.model['bootstrap_cfg']:
70
+ v_b1 = call_model_fn(x_t, t, dt_base_bootstrap, bst_labels, train=False)
71
+ t2 = t + dt_bootstrap
72
+ x_t2 = x_t + dt_bootstrap[:, None, None, None] * v_b1
73
+ x_t2 = jnp.clip(x_t2, -4, 4)
74
+ v_b2 = call_model_fn(x_t2, t2, dt_base_bootstrap, bst_labels, train=False)
75
+ v_target = (v_b1 + v_b2) / 2
76
+ else:#We are reverse time - t=0 is all noise, 1 is image
77
+ x_t_extra = jnp.concatenate([x_t, x_t[:num_dt_cfg]], axis=0)
78
+ t_extra = jnp.concatenate([t, t[:num_dt_cfg]], axis=0)
79
+ dt_base_extra = jnp.concatenate([dt_base_bootstrap, dt_base_bootstrap[:num_dt_cfg]], axis=0)
80
+ labels_extra = jnp.concatenate([bst_labels, jnp.ones(num_dt_cfg, dtype=jnp.int32) * FLAGS.model['num_classes']], axis=0)
81
+ print("t extra", t_extra)
82
+ print("dt_base_extra", dt_base_extra)#Always bigger than DT
83
+ v_b1_raw = call_model_fn(x_t_extra, t_extra, dt_base_extra, labels_extra, train=False)
84
+ v_b_cond = v_b1_raw[:x_1.shape[0]]
85
+ v_b_uncond = v_b1_raw[x_1.shape[0]:]
86
+ v_cfg = v_b_uncond + FLAGS.model['cfg_scale'] * (v_b_cond[:num_dt_cfg] - v_b_uncond)
87
+ v_b1 = jnp.concatenate([v_cfg, v_b_cond[num_dt_cfg:]], axis=0)
88
+ print("this is t", t)
89
+ print("dt boot", dt_bootstrap)
90
+ t2 = t + dt_bootstrap #This can never go over because a given t = (random 0, X)/x, and x = 2^bootstrap_base.
91
+ print("resulting t2", t2)
92
+ x_t2 = x_t + dt_bootstrap[:, None, None, None] * v_b1
93
+ x_t2 = jnp.clip(x_t2, -4, 4)
94
+ x_t2_extra = jnp.concatenate([x_t2, x_t2[:num_dt_cfg]], axis=0)
95
+ t2_extra = jnp.concatenate([t2, t2[:num_dt_cfg]], axis=0)
96
+ print("t2 extra", t2_extra)
97
+ print("dt_base_extra2", dt_base_extra)
98
+
99
+ v_b2_raw = call_model_fn(x_t2_extra, t2_extra, dt_base_extra, labels_extra, train=False)
100
+ v_b2_cond = v_b2_raw[:x_1.shape[0]]
101
+ v_b2_uncond = v_b2_raw[x_1.shape[0]:]
102
+ v_b2_cfg = v_b2_uncond + FLAGS.model['cfg_scale'] * (v_b2_cond[:num_dt_cfg] - v_b2_uncond)
103
+ v_b2 = jnp.concatenate([v_b2_cfg, v_b2_cond[num_dt_cfg:]], axis=0)
104
+ v_target = (v_b1 + v_b2) / 2
105
+
106
+ v_target = jnp.clip(v_target, -4, 4)
107
+ bst_v = v_target
108
+ bst_dt = dt_base
109
+ bst_t = t
110
+ bst_xt = x_t
111
+ bst_l = bst_labels
112
+
113
+ # 4) =========== Generate Flow-Matching Targets ============
114
+
115
+ labels_dropout = jax.random.bernoulli(label_key, FLAGS.model['class_dropout_prob'], (labels.shape[0],))
116
+ labels_dropped = jnp.where(labels_dropout, FLAGS.model['num_classes'], labels)
117
+ info['dropped_ratio'] = jnp.mean(labels_dropped == FLAGS.model['num_classes'])
118
+
119
+ # Sample t.
120
+ t = jax.random.randint(time_key, (images.shape[0],), minval=0, maxval=FLAGS.model['denoise_timesteps']).astype(jnp.float32)
121
+ t /= FLAGS.model['denoise_timesteps']
122
+ force_t_vec = jnp.ones(images.shape[0], dtype=jnp.float32) * force_t
123
+ t = jnp.where(force_t_vec != -1, force_t_vec, t) # If force_t is not -1, then use force_t.
124
+ t_full = t[:, None, None, None] # [batch, 1, 1, 1]
125
+
126
+ # Sample flow pairs x_t, v_t.
127
+ x_0 = jax.random.normal(noise_key, images.shape)
128
+ x_1 = images
129
+ x_t = x_t = (1 - (1 - 1e-5) * t_full) * x_0 + t_full * x_1
130
+ v_t = v_t = x_1 - (1 - 1e-5) * x_0
131
+ dt_flow = np.log2(FLAGS.model['denoise_timesteps']).astype(jnp.int32)
132
+ dt_base = jnp.ones(images.shape[0], dtype=jnp.int32) * dt_flow / 7.0
133
+ #Modified dtbase
134
+
135
+
136
+ # ==== 5) Merge Flow+Bootstrap ====
137
+ bst_size = FLAGS.batch_size // FLAGS.model['bootstrap_every']
138
+ bst_size_data = FLAGS.batch_size - bst_size
139
+ x_t = jnp.concatenate([bst_xt, x_t[:bst_size_data]], axis=0)
140
+ t = jnp.concatenate([bst_t, t[:bst_size_data]], axis=0)
141
+ dt_base = jnp.concatenate([bst_dt, dt_base[:bst_size_data]], axis=0)
142
+ v_t = jnp.concatenate([bst_v, v_t[:bst_size_data]], axis=0)
143
+ labels_dropped = jnp.concatenate([bst_l, labels_dropped[:bst_size_data]], axis=0)
144
+ info['bootstrap_ratio'] = jnp.mean(dt_base != dt_flow)
145
+
146
+ info['v_magnitude_bootstrap'] = jnp.sqrt(jnp.mean(jnp.square(bst_v)))
147
+ info['v_magnitude_b1'] = jnp.sqrt(jnp.mean(jnp.square(v_b1)))
148
+ info['v_magnitude_b2'] = jnp.sqrt(jnp.mean(jnp.square(v_b2)))
149
+
150
+ print("final t", t)
151
+ print("final dt base", dt_base)
152
+ #Right now, it is correct ish. I think my dt is not properly base 2 but whatever
153
+ return x_t, v_t, t, dt_base, labels_dropped, info