Namyoung Kim commited on
Commit
1e111c0
·
1 Parent(s): 82b2e47
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .DS_Store +0 -0
  2. .gitattributes +2 -0
  3. index.html +174 -280
  4. static/{videos/blueshirt.mp4 → images/figure_case.png} +2 -2
  5. static/images/interpolate_end.jpg +0 -0
  6. static/images/interpolate_start.jpg +0 -0
  7. static/{videos/chair-tp.mp4 → images/main_1.png} +2 -2
  8. static/{videos/coffee.mp4 → images/main_1_.png} +2 -2
  9. static/{videos/dollyzoom-depth.mp4 → images/main_2.png} +2 -2
  10. static/images/main_2_.png +3 -0
  11. static/images/main_results.png +3 -0
  12. static/images/principles.png +3 -0
  13. static/interpolation/stacked/000000.jpg +0 -0
  14. static/interpolation/stacked/000001.jpg +0 -0
  15. static/interpolation/stacked/000002.jpg +0 -0
  16. static/interpolation/stacked/000003.jpg +0 -0
  17. static/interpolation/stacked/000004.jpg +0 -0
  18. static/interpolation/stacked/000005.jpg +0 -0
  19. static/interpolation/stacked/000006.jpg +0 -0
  20. static/interpolation/stacked/000007.jpg +0 -0
  21. static/interpolation/stacked/000008.jpg +0 -0
  22. static/interpolation/stacked/000009.jpg +0 -0
  23. static/interpolation/stacked/000010.jpg +0 -0
  24. static/interpolation/stacked/000011.jpg +0 -0
  25. static/interpolation/stacked/000012.jpg +0 -0
  26. static/interpolation/stacked/000013.jpg +0 -0
  27. static/interpolation/stacked/000014.jpg +0 -0
  28. static/interpolation/stacked/000015.jpg +0 -0
  29. static/interpolation/stacked/000016.jpg +0 -0
  30. static/interpolation/stacked/000017.jpg +0 -0
  31. static/interpolation/stacked/000018.jpg +0 -0
  32. static/interpolation/stacked/000019.jpg +0 -0
  33. static/interpolation/stacked/000020.jpg +0 -0
  34. static/interpolation/stacked/000021.jpg +0 -0
  35. static/interpolation/stacked/000022.jpg +0 -0
  36. static/interpolation/stacked/000023.jpg +0 -0
  37. static/interpolation/stacked/000024.jpg +0 -0
  38. static/interpolation/stacked/000025.jpg +0 -0
  39. static/interpolation/stacked/000026.jpg +0 -0
  40. static/interpolation/stacked/000027.jpg +0 -0
  41. static/interpolation/stacked/000028.jpg +0 -0
  42. static/interpolation/stacked/000029.jpg +0 -0
  43. static/interpolation/stacked/000030.jpg +0 -0
  44. static/interpolation/stacked/000031.jpg +0 -0
  45. static/interpolation/stacked/000032.jpg +0 -0
  46. static/interpolation/stacked/000033.jpg +0 -0
  47. static/interpolation/stacked/000034.jpg +0 -0
  48. static/interpolation/stacked/000035.jpg +0 -0
  49. static/interpolation/stacked/000036.jpg +0 -0
  50. static/interpolation/stacked/000037.jpg +0 -0
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.gitattributes CHANGED
@@ -46,3 +46,5 @@ static/videos/shiba.mp4 filter=lfs diff=lfs merge=lfs -text
46
  static/videos/steve.mp4 filter=lfs diff=lfs merge=lfs -text
47
  static/videos/teaser.mp4 filter=lfs diff=lfs merge=lfs -text
48
  static/videos/toby.mp4 filter=lfs diff=lfs merge=lfs -text
 
 
 
46
  static/videos/steve.mp4 filter=lfs diff=lfs merge=lfs -text
47
  static/videos/teaser.mp4 filter=lfs diff=lfs merge=lfs -text
48
  static/videos/toby.mp4 filter=lfs diff=lfs merge=lfs -text
49
+ static/images/*.png filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
index.html CHANGED
@@ -3,10 +3,10 @@
3
  <head>
4
  <meta charset="utf-8">
5
  <meta name="description"
6
- content="Deformable Neural Radiance Fields creates free-viewpoint portraits (nerfies) from casually captured videos.">
7
- <meta name="keywords" content="Nerfies, D-NeRF, NeRF">
8
  <meta name="viewport" content="width=device-width, initial-scale=1">
9
- <title>Nerfies: Deformable Neural Radiance Fields</title>
10
 
11
  <link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro"
12
  rel="stylesheet">
@@ -15,16 +15,55 @@
15
  <link rel="stylesheet" href="./static/css/bulma-carousel.min.css">
16
  <link rel="stylesheet" href="./static/css/bulma-slider.min.css">
17
  <link rel="stylesheet" href="./static/css/fontawesome.all.min.css">
 
18
  <link rel="stylesheet"
19
  href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css">
20
  <link rel="stylesheet" href="./static/css/index.css">
21
  <link rel="icon" href="./static/images/favicon.svg">
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
24
  <script defer src="./static/js/fontawesome.all.min.js"></script>
25
  <script src="./static/js/bulma-carousel.min.js"></script>
26
  <script src="./static/js/bulma-slider.min.js"></script>
27
  <script src="./static/js/index.js"></script>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  </head>
29
  <body>
30
 
@@ -33,38 +72,45 @@
33
  <div class="container is-max-desktop">
34
  <div class="columns is-centered">
35
  <div class="column has-text-centered">
36
- <h1 class="title is-1 publication-title">Nerfies: Deformable Neural Radiance Fields</h1>
 
 
37
  <div class="is-size-5 publication-authors">
38
  <span class="author-block">
39
- <a href="https://keunhong.com" target="_blank">Keunhong Park</a><sup>1</sup>,</span>
 
40
  <span class="author-block">
41
- <a href="https://utkarshsinha.com" target="_blank">Utkarsh Sinha</a><sup>2</sup>,</span>
 
42
  <span class="author-block">
43
- <a href="https://jonbarron.info" target="_blank">Jonathan T. Barron</a><sup>2</sup>,
44
  </span>
45
  <span class="author-block">
46
- <a href="http://sofienbouaziz.com" target="_blank">Sofien Bouaziz</a><sup>2</sup>,
47
  </span>
 
48
  <span class="author-block">
49
- <a href="https://www.danbgoldman.com" target="_blank">Dan B Goldman</a><sup>2</sup>,
50
  </span>
51
  <span class="author-block">
52
- <a href="https://homes.cs.washington.edu/~seitz/" target="_blank">Steven M. Seitz</a><sup>1,2</sup>,
53
  </span>
54
  <span class="author-block">
55
- <a href="http://www.ricardomartinbrualla.com" target="_blank">Ricardo Martin-Brualla</a><sup>2</sup>
 
 
 
56
  </span>
57
  </div>
58
 
59
  <div class="is-size-5 publication-authors">
60
- <span class="author-block"><sup>1</sup>University of Washington,</span>
61
- <span class="author-block"><sup>2</sup>Google Research</span>
62
  </div>
63
 
64
  <div class="column has-text-centered">
65
  <div class="publication-links">
66
  <!-- PDF Link. -->
67
- <span class="link-block">
68
  <a href="https://arxiv.org/pdf/2011.12948" target="_blank"
69
  class="external-link button is-normal is-rounded is-dark">
70
  <span class="icon">
@@ -72,9 +118,9 @@
72
  </span>
73
  <span>Paper</span>
74
  </a>
75
- </span>
76
  <span class="link-block">
77
- <a href="https://arxiv.org/abs/2011.12948" target="_blank"
78
  class="external-link button is-normal is-rounded is-dark">
79
  <span class="icon">
80
  <i class="ai ai-arxiv"></i>
@@ -82,7 +128,7 @@
82
  <span>arXiv</span>
83
  </a>
84
  </span>
85
- <!-- Video Link. -->
86
  <span class="link-block">
87
  <a href="https://www.youtube.com/watch?v=MrKrnHhk8IA" target="_blank"
88
  class="external-link button is-normal is-rounded is-dark">
@@ -91,10 +137,10 @@
91
  </span>
92
  <span>Video</span>
93
  </a>
94
- </span>
95
  <!-- Code Link. -->
96
  <span class="link-block">
97
- <a href="https://github.com/google/nerfies" target="_blank"
98
  class="external-link button is-normal is-rounded is-dark">
99
  <span class="icon">
100
  <i class="fab fa-github"></i>
@@ -104,7 +150,7 @@
104
  </span>
105
  <!-- Dataset Link. -->
106
  <span class="link-block">
107
- <a href="https://github.com/google/nerfies/releases/tag/0.1" target="_blank"
108
  class="external-link button is-normal is-rounded is-dark">
109
  <span class="icon">
110
  <i class="far fa-images"></i>
@@ -120,80 +166,6 @@
120
  </div>
121
  </section>
122
 
123
- <section class="hero teaser">
124
- <div class="container is-max-desktop">
125
- <div class="hero-body">
126
- <video id="teaser" autoplay muted loop playsinline height="100%">
127
- <source src="./static/videos/teaser.mp4"
128
- type="video/mp4">
129
- </video>
130
- <h2 class="subtitle has-text-centered">
131
- <span class="dnerf">Nerfies</span> turns selfie videos from your phone into
132
- free-viewpoint
133
- portraits.
134
- </h2>
135
- </div>
136
- </div>
137
- </section>
138
-
139
-
140
- <section class="hero is-light is-small">
141
- <div class="hero-body">
142
- <div class="container">
143
- <div id="results-carousel" class="carousel results-carousel">
144
- <div class="item item-steve">
145
- <video poster="" id="steve" autoplay controls muted loop playsinline height="100%">
146
- <source src="./static/videos/steve.mp4"
147
- type="video/mp4">
148
- </video>
149
- </div>
150
- <div class="item item-chair-tp">
151
- <video poster="" id="chair-tp" autoplay controls muted loop playsinline height="100%">
152
- <source src="./static/videos/chair-tp.mp4"
153
- type="video/mp4">
154
- </video>
155
- </div>
156
- <div class="item item-shiba">
157
- <video poster="" id="shiba" autoplay controls muted loop playsinline height="100%">
158
- <source src="./static/videos/shiba.mp4"
159
- type="video/mp4">
160
- </video>
161
- </div>
162
- <div class="item item-fullbody">
163
- <video poster="" id="fullbody" autoplay controls muted loop playsinline height="100%">
164
- <source src="./static/videos/fullbody.mp4"
165
- type="video/mp4">
166
- </video>
167
- </div>
168
- <div class="item item-blueshirt">
169
- <video poster="" id="blueshirt" autoplay controls muted loop playsinline height="100%">
170
- <source src="./static/videos/blueshirt.mp4"
171
- type="video/mp4">
172
- </video>
173
- </div>
174
- <div class="item item-mask">
175
- <video poster="" id="mask" autoplay controls muted loop playsinline height="100%">
176
- <source src="./static/videos/mask.mp4"
177
- type="video/mp4">
178
- </video>
179
- </div>
180
- <div class="item item-coffee">
181
- <video poster="" id="coffee" autoplay controls muted loop playsinline height="100%">
182
- <source src="./static/videos/coffee.mp4"
183
- type="video/mp4">
184
- </video>
185
- </div>
186
- <div class="item item-toby">
187
- <video poster="" id="toby" autoplay controls muted loop playsinline height="100%">
188
- <source src="./static/videos/toby2.mp4"
189
- type="video/mp4">
190
- </video>
191
- </div>
192
- </div>
193
- </div>
194
- </div>
195
- </section>
196
-
197
 
198
  <section class="section">
199
  <div class="container is-max-desktop">
@@ -203,233 +175,155 @@
203
  <h2 class="title is-3">Abstract</h2>
204
  <div class="content has-text-justified">
205
  <p>
206
- We present the first method capable of photorealistically reconstructing a non-rigidly
207
- deforming scene using photos/videos captured casually from mobile phones.
208
- </p>
209
- <p>
210
- Our approach augments neural radiance fields
211
- (NeRF) by optimizing an
212
- additional continuous volumetric deformation field that warps each observed point into a
213
- canonical 5D NeRF.
214
- We observe that these NeRF-like deformation fields are prone to local minima, and
215
- propose a coarse-to-fine optimization method for coordinate-based models that allows for
216
- more robust optimization.
217
- By adapting principles from geometry processing and physical simulation to NeRF-like
218
- models, we propose an elastic regularization of the deformation field that further
219
- improves robustness.
220
- </p>
221
- <p>
222
- We show that <span class="dnerf">Nerfies</span> can turn casually captured selfie
223
- photos/videos into deformable NeRF
224
- models that allow for photorealistic renderings of the subject from arbitrary
225
- viewpoints, which we dub <i>"nerfies"</i>. We evaluate our method by collecting data
226
- using a
227
- rig with two mobile phones that take time-synchronized photos, yielding train/validation
228
- images of the same pose at different viewpoints. We show that our method faithfully
229
- reconstructs non-rigidly deforming scenes and reproduces unseen views with high
230
- fidelity.
231
  </p>
232
  </div>
233
  </div>
234
  </div>
235
  <!--/ Abstract. -->
236
-
237
- <!-- Paper video. -->
238
- <div class="columns is-centered has-text-centered">
239
- <div class="column is-four-fifths">
240
- <h2 class="title is-3">Video</h2>
241
- <div class="publication-video">
242
- <iframe src="https://www.youtube.com/embed/MrKrnHhk8IA?rel=0&amp;showinfo=0"
243
- frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
244
- </div>
245
- </div>
246
- </div>
247
- <!--/ Paper video. -->
248
  </div>
249
  </section>
250
 
251
 
 
252
  <section class="section">
253
  <div class="container is-max-desktop">
254
-
255
  <div class="columns is-centered">
256
-
257
- <!-- Visual Effects. -->
258
  <div class="column">
259
- <div class="content">
260
- <h2 class="title is-3">Visual Effects</h2>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261
  <p>
262
- Using <i>nerfies</i> you can create fun visual effects. This Dolly zoom effect
263
- would be impossible without nerfies since it would require going through a wall.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264
  </p>
265
- <video id="dollyzoom" autoplay controls muted loop playsinline height="100%">
266
- <source src="./static/videos/dollyzoom-stacked.mp4"
267
- type="video/mp4">
268
- </video>
269
  </div>
270
- </div>
271
- <!--/ Visual Effects. -->
272
 
273
- <!-- Matting. -->
274
- <div class="column">
275
- <h2 class="title is-3">Matting</h2>
276
- <div class="columns is-centered">
277
- <div class="column content">
278
- <p>
279
- As a byproduct of our method, we can also solve the matting problem by ignoring
280
- samples that fall outside of a bounding box during rendering.
281
- </p>
282
- <video id="matting-video" controls playsinline height="100%">
283
- <source src="./static/videos/matting.mp4"
284
- type="video/mp4">
285
- </video>
286
  </div>
 
 
 
 
 
 
 
 
 
287
 
288
  </div>
289
  </div>
290
  </div>
291
- <!--/ Matting. -->
292
-
293
- <!-- Animation. -->
294
- <div class="columns is-centered">
295
- <div class="column is-full-width">
296
- <h2 class="title is-3">Animation</h2>
297
 
298
- <!-- Interpolating. -->
299
- <h3 class="title is-4">Interpolating states</h3>
 
 
 
300
  <div class="content has-text-justified">
301
  <p>
302
- We can also animate the scene by interpolating the deformation latent codes of two input
303
- frames. Use the slider here to linearly interpolate between the left frame and the right
304
- frame.
305
- </p>
306
- </div>
307
- <div class="columns is-vcentered interpolation-panel">
308
- <div class="column is-3 has-text-centered">
309
- <img src="./static/images/interpolate_start.jpg"
310
- class="interpolation-image"
311
- alt="Interpolate start reference image."/>
312
- <p>Start Frame</p>
313
- </div>
314
- <div class="column interpolation-video-column">
315
- <div id="interpolation-image-wrapper">
316
- Loading...
317
- </div>
318
- <input class="slider is-fullwidth is-large is-info"
319
- id="interpolation-slider"
320
- step="1" min="0" max="100" value="0" type="range">
321
  </div>
322
- <div class="column is-3 has-text-centered">
323
- <img src="./static/images/interpolate_end.jpg"
324
- class="interpolation-image"
325
- alt="Interpolation end reference image."/>
326
- <p class="is-bold">End Frame</p>
327
- </div>
328
- </div>
329
- <br/>
330
- <!--/ Interpolating. -->
331
-
332
- <!-- Re-rendering. -->
333
- <h3 class="title is-4">Re-rendering the input video</h3>
334
- <div class="content has-text-justified">
335
- <p>
336
- Using <span class="dnerf">Nerfies</span>, you can re-render a video from a novel
337
- viewpoint such as a stabilized camera by playing back the training deformations.
338
  </p>
339
  </div>
340
- <div class="content has-text-centered">
341
- <video id="replay-video"
342
- controls
343
- muted
344
- preload
345
- playsinline
346
- width="75%">
347
- <source src="./static/videos/replay.mp4"
348
- type="video/mp4">
349
- </video>
350
- </div>
351
- <!--/ Re-rendering. -->
352
-
353
- </div>
354
- </div>
355
- <!--/ Animation. -->
356
-
357
 
358
- <!-- Concurrent Work. -->
359
- <div class="columns is-centered">
360
- <div class="column is-full-width">
361
- <h2 class="title is-3">Related Links</h2>
362
 
 
 
 
 
 
363
  <div class="content has-text-justified">
364
  <p>
365
- There's a lot of excellent work that was introduced around the same time as ours.
366
- </p>
367
- <p>
368
- <a href="https://arxiv.org/abs/2104.09125" target="_blank">Progressive Encoding for Neural Optimization</a> introduces an idea similar to our windowed position encoding for coarse-to-fine optimization.
369
- </p>
370
- <p>
371
- <a href="https://www.albertpumarola.com/research/D-NeRF/index.html" target="_blank">D-NeRF</a> and <a href="https://gvv.mpi-inf.mpg.de/projects/nonrigid_nerf/" target="_blank">NR-NeRF</a>
372
- both use deformation fields to model non-rigid scenes.
373
- </p>
374
- <p>
375
- Some works model videos with a NeRF by directly modulating the density, such as <a href="https://video-nerf.github.io/" target="_blank">Video-NeRF</a>, <a href="https://www.cs.cornell.edu/~zl548/NSFF/" target="_blank">NSFF</a>, and <a href="https://neural-3d-video.github.io/" target="_blank">DyNeRF</a>
376
- </p>
377
- <p>
378
- There are probably many more by the time you are reading this. Check out <a href="https://dellaert.github.io/NeRF/" target="_blank">Frank Dellart's survey on recent NeRF papers</a>, and <a href="https://github.com/yenchenlin/awesome-NeRF" target="_blank">Yen-Chen Lin's curated list of NeRF papers</a>.
379
  </p>
380
  </div>
381
- </div>
382
- </div>
383
- <!--/ Concurrent Work. -->
384
-
385
  </div>
386
  </section>
387
 
388
-
389
- <section class="section" id="BibTeX">
390
  <div class="container is-max-desktop content">
391
  <h2 class="title">BibTeX</h2>
392
  <pre><code>@article{park2021nerfies,
393
  author = {Park, Keunhong and Sinha, Utkarsh and Barron, Jonathan T. and Bouaziz, Sofien and Goldman, Dan B and Seitz, Steven M. and Martin-Brualla, Ricardo},
394
  title = {Nerfies: Deformable Neural Radiance Fields},
395
- journal = {ICCV},
396
- year = {2021},
397
  }</code></pre>
398
  </div>
399
- </section>
400
-
401
-
402
- <footer class="footer">
403
- <div class="container">
404
- <div class="content has-text-centered">
405
- <a class="icon-link" target="_blank"
406
- href="./static/videos/nerfies_paper.pdf">
407
- <i class="fas fa-file-pdf"></i>
408
- </a>
409
- <a class="icon-link" href="https://github.com/keunhong" target="_blank" class="external-link" disabled>
410
- <i class="fab fa-github"></i>
411
- </a>
412
- </div>
413
- <div class="columns is-centered">
414
- <div class="column is-8">
415
- <div class="content">
416
- <p>
417
- This website is licensed under a <a rel="license" target="_blank"
418
- href="http://creativecommons.org/licenses/by-sa/4.0/">Creative
419
- Commons Attribution-ShareAlike 4.0 International License</a>.
420
- </p>
421
- <p>
422
- This means you are free to borrow the <a target="_blank"
423
- href="https://github.com/nerfies/nerfies.github.io">source code</a> of this website,
424
- we just ask that you link back to this page in the footer.
425
- Please remember to remove the analytics code included in the header of the website which
426
- you do not want on your website.
427
- </p>
428
- </div>
429
- </div>
430
- </div>
431
- </div>
432
- </footer>
433
-
434
- </body>
435
- </html>
 
3
  <head>
4
  <meta charset="utf-8">
5
  <meta name="description"
6
+ content="PRINCIPLES: Synthetic Strategy Memory for Proactive Dialogue Agents">
7
+ <meta name="keywords" content="Conversational Agents, Proactive Dialogue">
8
  <meta name="viewport" content="width=device-width, initial-scale=1">
9
+ <title>PRINCIPLES: Synthetic Strategy Memory for Proactive Dialogue Agents</title>
10
 
11
  <link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro"
12
  rel="stylesheet">
 
15
  <link rel="stylesheet" href="./static/css/bulma-carousel.min.css">
16
  <link rel="stylesheet" href="./static/css/bulma-slider.min.css">
17
  <link rel="stylesheet" href="./static/css/fontawesome.all.min.css">
18
+
19
  <link rel="stylesheet"
20
  href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css">
21
  <link rel="stylesheet" href="./static/css/index.css">
22
  <link rel="icon" href="./static/images/favicon.svg">
23
 
24
+ <!-- MathJax for LaTeX support -->
25
+ <script type="text/javascript" async
26
+ src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/MathJax.js?config=TeX-MML-AM_CHTML">
27
+ </script>
28
+ <script type="text/x-mathjax-config">
29
+ MathJax.Hub.Config({
30
+ tex2jax: {
31
+ inlineMath: [['$','$'], ['\\(','\\)']],
32
+ displayMath: [['$$','$$'], ['\\[','\\]']],
33
+ processEscapes: true
34
+ }
35
+ });
36
+ </script>
37
+
38
  <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
39
  <script defer src="./static/js/fontawesome.all.min.js"></script>
40
  <script src="./static/js/bulma-carousel.min.js"></script>
41
  <script src="./static/js/bulma-slider.min.js"></script>
42
  <script src="./static/js/index.js"></script>
43
+ <script>
44
+ function showStep(stepNumber) {
45
+ // Hide all step contents
46
+ var stepContents = document.querySelectorAll('.step-content');
47
+ for (var i = 0; i < stepContents.length; i++) {
48
+ stepContents[i].style.display = 'none';
49
+ }
50
+
51
+ // Remove active class from all tabs
52
+ var tabs = document.querySelectorAll('.tabs li');
53
+ for (var i = 0; i < tabs.length; i++) {
54
+ tabs[i].classList.remove('is-active');
55
+ }
56
+
57
+ // Show the selected step content and activate its tab
58
+ document.getElementById('step' + stepNumber + '-content').style.display = 'block';
59
+ document.getElementById('step' + stepNumber + '-tab').classList.add('is-active');
60
+ }
61
+
62
+ // Initialize when DOM is fully loaded
63
+ document.addEventListener('DOMContentLoaded', function() {
64
+ showStep(1);
65
+ });
66
+ </script>
67
  </head>
68
  <body>
69
 
 
72
  <div class="container is-max-desktop">
73
  <div class="columns is-centered">
74
  <div class="column has-text-centered">
75
+ <div style="display: flex; align-items: center; justify-content: center; margin-bottom: 20px;">
76
+ <h1 class="title is-1 publication-title" style="margin: 0 0 0 20px;"><img src="./static/images/principles.png" style="height: 60px; vertical-align: middle; margin-right: 5px; margin-bottom: 10px;">PRINCIPLES: Synthetic Strategy Memory for Proactive Dialogue Agents</h1>
77
+ </div>
78
  <div class="is-size-5 publication-authors">
79
  <span class="author-block">
80
+ <a href="#">Namyoung Kim,</a>
81
+ </span>
82
  <span class="author-block">
83
+ <a href="#">Kai Tzu-iunn Ong,</a>
84
+ </span>
85
  <span class="author-block">
86
+ <a href="#">Yeonjun Hwang</a>
87
  </span>
88
  <span class="author-block">
89
+ <a href="#">Minseok Kang</a>
90
  </span>
91
+ <br>
92
  <span class="author-block">
93
+ <a href="#">Iiseo Jihn,</a>
94
  </span>
95
  <span class="author-block">
96
+ <a href="#">Gayoung Kim,</a>
97
  </span>
98
  <span class="author-block">
99
+ <a href="#">Minju Kim,</a>
100
+ </span>
101
+ <span class="author-block">
102
+ <a href="#">Jinyoung Yeo</a>
103
  </span>
104
  </div>
105
 
106
  <div class="is-size-5 publication-authors">
107
+ <span class="author-block">Department of Artificial Intelligence, Yonsei University</span>
 
108
  </div>
109
 
110
  <div class="column has-text-centered">
111
  <div class="publication-links">
112
  <!-- PDF Link. -->
113
+ <!-- <span class="link-block">
114
  <a href="https://arxiv.org/pdf/2011.12948" target="_blank"
115
  class="external-link button is-normal is-rounded is-dark">
116
  <span class="icon">
 
118
  </span>
119
  <span>Paper</span>
120
  </a>
121
+ </span> -->
122
  <span class="link-block">
123
+ <a href="https://arxiv.org/abs/2509.17459" target="_blank"
124
  class="external-link button is-normal is-rounded is-dark">
125
  <span class="icon">
126
  <i class="ai ai-arxiv"></i>
 
128
  <span>arXiv</span>
129
  </a>
130
  </span>
131
+ <!-- Video Link.
132
  <span class="link-block">
133
  <a href="https://www.youtube.com/watch?v=MrKrnHhk8IA" target="_blank"
134
  class="external-link button is-normal is-rounded is-dark">
 
137
  </span>
138
  <span>Video</span>
139
  </a>
140
+ </span> -->
141
  <!-- Code Link. -->
142
  <span class="link-block">
143
+ <a href="https://github.com/kimnamssya/Principles" target="_blank"
144
  class="external-link button is-normal is-rounded is-dark">
145
  <span class="icon">
146
  <i class="fab fa-github"></i>
 
150
  </span>
151
  <!-- Dataset Link. -->
152
  <span class="link-block">
153
+ <a href="https://huggingface.co/datasets/LangAGI-Lab/P4GPlus" target="_blank"
154
  class="external-link button is-normal is-rounded is-dark">
155
  <span class="icon">
156
  <i class="far fa-images"></i>
 
166
  </div>
167
  </section>
168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
 
170
  <section class="section">
171
  <div class="container is-max-desktop">
 
175
  <h2 class="title is-3">Abstract</h2>
176
  <div class="content has-text-justified">
177
  <p>
178
+ Dialogue agents based on large language models (LLMs) have shown promising performance in proactive dialogue, which requires effective strategy planning.
179
+ However, existing approaches to strategy planning for proactive dialogue face several limitations:
180
+ limited strategy coverage, preference bias in planning, and reliance on costly additional training.
181
+ To address these, we propose <img src="static/images/principles.png" style="height: 1em; vertical-align: middle;"><b>PRINCIPLES</b>: a synthetic strategy memory for proactive dialogue agents.
182
+ PRINCIPLES is derived through offline self-play simulations and serves as reusable knowledge that guides strategy planning during inference, eliminating the need for additional training and data annotation.
183
+ We evaluate PRINCIPLES in both emotional support and persuasion domains, demonstrating consistent improvements over strong baselines.
184
+ Furthermore, PRINCIPLES maintains its robustness across extended and more diverse evaluation settings.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
  </p>
186
  </div>
187
  </div>
188
  </div>
189
  <!--/ Abstract. -->
 
 
 
 
 
 
 
 
 
 
 
 
190
  </div>
191
  </section>
192
 
193
 
194
+
195
  <section class="section">
196
  <div class="container is-max-desktop">
 
197
  <div class="columns is-centered">
 
 
198
  <div class="column">
199
+ <h2 class="title is-2">Methodology</h2>
200
+ <div class="tabs">
201
+ <ul>
202
+ <li id="step1-tab" class="is-active"><a href="#" onclick="showStep(1); return false;">Phase I: Principles Construction</a></li>
203
+ <li id="step2-tab"><a href="#" onclick="showStep(2); return false;">Phase II: Principles-driven Strategy Planning</a></li>
204
+ </ul>
205
+ </div>
206
+ <div id="step1-content" class="content step-content">
207
+ <div class="content has-text-centered">
208
+ <img src="static/images/main_1.png" alt="Principles Construction">
209
+ </div>
210
+ <h3 class="title is-4">Step I: Success and Failure Detection</h3>
211
+ <p>
212
+ At each turn \( t \), the agent and the user simulator generate their responses, and a critic model assigns a scalar reward \( r_t \).
213
+ We determine the <code>status</code> as either success or failure by evaluating whether the reward is higher than the previous turn:
214
+ \begin{equation}
215
+ \text{status}(s_t, a_t, u_t) =
216
+ \begin{cases}
217
+ \text{1} & \text{if } r_t > r_{t-1} \\
218
+ \text{0} & \text{otherwise}
219
+ \end{cases}
220
+ \end{equation}
221
+ </p>
222
+
223
+ <h3 class="title is-4">Step II: Strategy Revision</h3>
224
  <p>
225
+ Upon detecting a failure, the simulation invokes a revision step to refine the previously failed strategic decision.
226
+ It then generates a revised strategy \(\sigma_t^{\prime}\) to re-simulate from the failure point, leveraging prior failed attempts at turn \(t\). Formally, the revised strategy is generated as:
227
+ \begin{equation}
228
+ \sigma_t^{\prime} = \texttt{LLM}_{\theta}(\rho_{r}; s_t, \mathcal{F}_t)
229
+ \end{equation}
230
+ where \(\rho_{r}\) is the revision prompt and \(\mathcal{F}_t\) denotes the set of previously failed trials at turn \(t\), defined as \( \mathcal{F}_t = \{ (\sigma_t^{1}, a_t^{1}, u_t^{1}), \dots, (\sigma_t^{n}, a_t^{n}, u_t^{n}) \} \)
231
+ where \(n\) is the maximum number of failed attempts. This failure history guides the model to avoid previously ineffective strategies.
232
+ </p>
233
+
234
+ <h3 class="title is-4">Step III: Re-simulation via Backtracking</h3>
235
+ <p>
236
+ After generating a revised strategy \(\sigma_t^{\prime}\), the simulation backtracks to the original state \(s_t\) preceding the failure and re-simulates turn \(t\) using \(\sigma_t^{\prime}\). The agent generates a revised response \(a_t^{\prime}\), and the user simulator produces a new reply \(u_t^{\prime}\) based on the updated context.
237
+ \begin{equation}
238
+ a_t^{\prime} = \texttt{LLM}_{\theta}(\rho_{a}; s_t, \sigma_t^{\prime})
239
+ \end{equation}
240
+ \begin{equation}
241
+ u_t^{\prime} = \texttt{LLM}_{\theta}(\rho_{u}; s_t, a_t^{\prime})
242
+ \end{equation}
243
+
244
+ </p>
245
+ <h3 class="title is-4">Step IV: Principle Derivation</h3>
246
+ <p>
247
+ If the corrected turn is re-evaluated as successful (<code>status</code> == 1), indicating a transition from failure to success, we derive a principle \( \tilde{p_t} \) as a result of overcoming the failure:
248
+ \begin{equation}
249
+ \tilde{p_t} = \texttt{LLM}_{\theta}(\rho_{\psi}; s_{t}, \mathcal{T}_t^{*}, \mathcal{F}_{t})
250
+ \end{equation}
251
+ where \(\rho_{\psi}\) is a prompt designed to extract a principle from failure, and the successful revised interaction is denoted as \(\mathcal{T}_t^{*} = (\sigma_t^{*}, a_t^{*}, u_t^{*})\). The extracted principle is then added to the principle set \(\mathcal{P}\):
252
+ \begin{equation}
253
+ \mathcal{P} \leftarrow \mathcal{P} \cup \{ \tilde{p_t} \}
254
+ \end{equation}
255
+
256
  </p>
 
 
 
 
257
  </div>
 
 
258
 
259
+ <div id="step2-content" class="content step-content" style="display: none;">
260
+ <div class="content has-text-centered">
261
+ <img src="static/images/main_2.png" alt="Principles-driven Strategy Planning">
 
 
 
 
 
 
 
 
 
 
262
  </div>
263
+ <p>
264
+ To apply the extracted PRINCIPLES at inference time, we first identify candidate principles that closely match the current context. Since the <code>When</code> clause captures the core situation, we retrieve relevant top-\(k\) principles by comparing the current state \(s_t\) and the <code>When</code> clause using L2 distance between embedding vectors.
265
+ Only the <code>When</code> component of each principle is used to compute similarity, allowing us to identify contextually analogous dialogue situations across diverse scenarios. We denote the set of top-\(k\) retrieved principles as \( \Sigma_t = \{\sigma_1, \dots, \sigma_k\} \subset \mathcal{P} \). Since even within the same domain, retrieved principles may not directly align with the dialogue context, we perform a reinterpretation step. Formally, the reinterpreted principles \(\tilde{\Sigma}_t\) are generated as:
266
+ \begin{equation}
267
+ \tilde{\Sigma}_t = \texttt{LLM}_{\theta}(\rho_{\nu}; s_t, \Sigma_t)
268
+ \end{equation}
269
+ where \(\rho_{\nu}\) is a reinterpretation prompt designed to adapt retrieved principles \(\Sigma_t\) to the current context. This aligns each principle with the context.
270
+ </p>
271
+
272
 
273
  </div>
274
  </div>
275
  </div>
276
+ </div>
277
+ </section>
 
 
 
 
278
 
279
+ <section class="section">
280
+ <div class="container is-max-desktop">
281
+ <!-- <div class="columns is-centered has-text-centered"> -->
282
+ <!-- <div class="column is-four-fifths"> -->
283
+ <h2 class="title is-2">Qualitative Example</h2>
284
  <div class="content has-text-justified">
285
  <p>
286
+ Qualitative example comparing AnE, PPDPP, and our approach based on PRINCIPLES. our
287
+ approach tended to combine logical coherence and emotional empathy (i.e., Balanced Support).
288
+ <div class="content has-text-centered">
289
+ <img src="static/images/figure_case.png" alt="Qualitative example" style="width: 100%;">
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
  </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
  </p>
292
  </div>
293
+ <!-- </div> -->
294
+ <!-- </div> -->
295
+ <!--/ Abstract. -->
296
+ </div>
297
+ </section>
 
 
 
 
 
 
 
 
 
 
 
 
298
 
 
 
 
 
299
 
300
+ <section class="section">
301
+ <div class="container is-max-desktop">
302
+ <!-- <div class="columns is-centered has-text-centered"> -->
303
+ <!-- <div class="column is-four-fifths"> -->
304
+ <h2 class="title is-2">Main Results</h2>
305
  <div class="content has-text-justified">
306
  <p>
307
+ We investigate our method’s effectiveness in addressing three key challenges in strategy planning: coverage, bias, and training. For more details, please refer to our paper.
308
+ <div class="content has-text-centered">
309
+ <img src="static/images/main_results.png" alt="Main results" style="width: 100%;">
310
+ </div>
 
 
 
 
 
 
 
 
 
 
311
  </p>
312
  </div>
313
+ <!-- </div> -->
314
+ <!-- </div> -->
315
+ <!--/ Abstract. -->
 
316
  </div>
317
  </section>
318
 
319
+ <!-- <section class="section" id="BibTeX">
 
320
  <div class="container is-max-desktop content">
321
  <h2 class="title">BibTeX</h2>
322
  <pre><code>@article{park2021nerfies,
323
  author = {Park, Keunhong and Sinha, Utkarsh and Barron, Jonathan T. and Bouaziz, Sofien and Goldman, Dan B and Seitz, Steven M. and Martin-Brualla, Ricardo},
324
  title = {Nerfies: Deformable Neural Radiance Fields},
325
+ journal = {<EMNLP>},
326
+ year = {2025},
327
  }</code></pre>
328
  </div>
329
+ </section> -->
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
static/{videos/blueshirt.mp4 → images/figure_case.png} RENAMED
File without changes
static/images/interpolate_end.jpg DELETED
Binary file (113 kB)
 
static/images/interpolate_start.jpg DELETED
Binary file (117 kB)
 
static/{videos/chair-tp.mp4 → images/main_1.png} RENAMED
File without changes
static/{videos/coffee.mp4 → images/main_1_.png} RENAMED
File without changes
static/{videos/dollyzoom-depth.mp4 → images/main_2.png} RENAMED
File without changes
static/images/main_2_.png ADDED

Git LFS Details

  • SHA256: ec0d4bc764e4449f7654e1995a9a9d21101d7ff21c26007fe4d8a520b4b181ff
  • Pointer size: 132 Bytes
  • Size of remote file: 1.66 MB
static/images/main_results.png ADDED

Git LFS Details

  • SHA256: 29fdcebf6f8dc1a884d84dd7f44d7c662c80f248e55456afcdd6d3f66ae35a66
  • Pointer size: 131 Bytes
  • Size of remote file: 596 kB
static/images/principles.png ADDED

Git LFS Details

  • SHA256: 78c1e642411817d7b05d4b2eb75eee9ea87cc6875ed518b17e7a8b1a72f356ea
  • Pointer size: 131 Bytes
  • Size of remote file: 339 kB
static/interpolation/stacked/000000.jpg DELETED
Binary file (128 kB)
 
static/interpolation/stacked/000001.jpg DELETED
Binary file (128 kB)
 
static/interpolation/stacked/000002.jpg DELETED
Binary file (128 kB)
 
static/interpolation/stacked/000003.jpg DELETED
Binary file (128 kB)
 
static/interpolation/stacked/000004.jpg DELETED
Binary file (128 kB)
 
static/interpolation/stacked/000005.jpg DELETED
Binary file (129 kB)
 
static/interpolation/stacked/000006.jpg DELETED
Binary file (129 kB)
 
static/interpolation/stacked/000007.jpg DELETED
Binary file (129 kB)
 
static/interpolation/stacked/000008.jpg DELETED
Binary file (129 kB)
 
static/interpolation/stacked/000009.jpg DELETED
Binary file (129 kB)
 
static/interpolation/stacked/000010.jpg DELETED
Binary file (129 kB)
 
static/interpolation/stacked/000011.jpg DELETED
Binary file (129 kB)
 
static/interpolation/stacked/000012.jpg DELETED
Binary file (130 kB)
 
static/interpolation/stacked/000013.jpg DELETED
Binary file (130 kB)
 
static/interpolation/stacked/000014.jpg DELETED
Binary file (130 kB)
 
static/interpolation/stacked/000015.jpg DELETED
Binary file (130 kB)
 
static/interpolation/stacked/000016.jpg DELETED
Binary file (130 kB)
 
static/interpolation/stacked/000017.jpg DELETED
Binary file (131 kB)
 
static/interpolation/stacked/000018.jpg DELETED
Binary file (131 kB)
 
static/interpolation/stacked/000019.jpg DELETED
Binary file (130 kB)
 
static/interpolation/stacked/000020.jpg DELETED
Binary file (130 kB)
 
static/interpolation/stacked/000021.jpg DELETED
Binary file (131 kB)
 
static/interpolation/stacked/000022.jpg DELETED
Binary file (131 kB)
 
static/interpolation/stacked/000023.jpg DELETED
Binary file (131 kB)
 
static/interpolation/stacked/000024.jpg DELETED
Binary file (131 kB)
 
static/interpolation/stacked/000025.jpg DELETED
Binary file (131 kB)
 
static/interpolation/stacked/000026.jpg DELETED
Binary file (131 kB)
 
static/interpolation/stacked/000027.jpg DELETED
Binary file (131 kB)
 
static/interpolation/stacked/000028.jpg DELETED
Binary file (131 kB)
 
static/interpolation/stacked/000029.jpg DELETED
Binary file (131 kB)
 
static/interpolation/stacked/000030.jpg DELETED
Binary file (131 kB)
 
static/interpolation/stacked/000031.jpg DELETED
Binary file (131 kB)
 
static/interpolation/stacked/000032.jpg DELETED
Binary file (131 kB)
 
static/interpolation/stacked/000033.jpg DELETED
Binary file (131 kB)
 
static/interpolation/stacked/000034.jpg DELETED
Binary file (131 kB)
 
static/interpolation/stacked/000035.jpg DELETED
Binary file (131 kB)
 
static/interpolation/stacked/000036.jpg DELETED
Binary file (132 kB)
 
static/interpolation/stacked/000037.jpg DELETED
Binary file (132 kB)