banao-tech commited on
Commit
feed11f
·
verified ·
1 Parent(s): 34de7da

Update frontend/app.js

Browse files
Files changed (1) hide show
  1. frontend/app.js +104 -31
frontend/app.js CHANGED
@@ -1,62 +1,135 @@
1
  import * as THREE from "three";
2
  import { GLTFLoader } from "three/addons/loaders/GLTFLoader.js";
3
 
4
- let mesh, timeline = [];
5
- let audio, startTime;
 
 
6
 
 
7
  const scene = new THREE.Scene();
8
- const cam = new THREE.PerspectiveCamera(40, 1, 0.1, 10);
9
- cam.position.z = 1;
10
 
 
 
 
 
 
11
  const renderer = new THREE.WebGLRenderer({ antialias: true });
12
- renderer.setSize(500, 500);
 
13
  document.body.appendChild(renderer.domElement);
14
 
15
- scene.add(new THREE.AmbientLight(0xffffff, 1));
 
 
 
 
 
 
 
 
16
 
17
- new GLTFLoader().load("facecap.glb", g => {
18
- scene.add(g.scene);
19
- g.scene.traverse(o => {
20
- if (o.morphTargetDictionary) mesh = o;
 
 
21
  });
 
 
 
 
 
22
  });
23
 
24
- function applyViseme(v) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  if (!mesh) return;
26
- mesh.morphTargetInfluences.fill(0);
27
- const map = {
28
- A: "jawOpen",
29
- E: "mouthSmile_L",
30
- O: "mouthFunnel",
31
- BMP: "mouthClose",
32
- FV: "mouthPucker"
33
- };
34
- const k = map[v];
35
- if (k && mesh.morphTargetDictionary[k] !== undefined)
36
- mesh.morphTargetInfluences[mesh.morphTargetDictionary[k]] = 1;
 
 
 
37
  }
38
 
39
- document.getElementById("speak").onclick = async () => {
 
 
 
40
  const res = await fetch("/speak", {
41
  method: "POST",
42
- headers: {"Content-Type":"application/json"},
43
- body: JSON.stringify({ text: text.value })
44
  });
 
45
  const data = await res.json();
46
- timeline = data.timeline;
47
 
 
48
  audio = new Audio(data.audio);
49
  await audio.play();
 
50
  startTime = performance.now() / 1000;
51
  };
52
 
 
 
 
53
  function animate() {
54
  requestAnimationFrame(animate);
55
- if (audio && !audio.paused) {
56
- const t = performance.now()/1000 - startTime;
57
- const cur = timeline.findLast(x => x.t <= t);
58
- if (cur) applyViseme(cur.v);
 
 
 
 
 
 
 
 
59
  }
60
- renderer.render(scene, cam);
 
 
 
 
 
 
 
61
  }
 
62
  animate();
 
 
 
 
 
 
 
 
 
1
  import * as THREE from "three";
2
  import { GLTFLoader } from "three/addons/loaders/GLTFLoader.js";
3
 
4
+ let mesh = null;
5
+ let timeline = [];
6
+ let audio = null;
7
+ let startTime = 0;
8
 
9
+ // ---------------- SCENE ----------------
10
  const scene = new THREE.Scene();
11
+ scene.background = new THREE.Color(0x111111);
 
12
 
13
+ // Camera (FRONT OF FACE)
14
+ const camera = new THREE.PerspectiveCamera(35, 1, 0.1, 10);
15
+ camera.position.set(0, 0.05, 0.75);
16
+
17
+ // Renderer
18
  const renderer = new THREE.WebGLRenderer({ antialias: true });
19
+ renderer.setSize(520, 520);
20
+ renderer.setPixelRatio(window.devicePixelRatio);
21
  document.body.appendChild(renderer.domElement);
22
 
23
+ // Lights
24
+ scene.add(new THREE.AmbientLight(0xffffff, 0.8));
25
+ const dir = new THREE.DirectionalLight(0xffffff, 0.6);
26
+ dir.position.set(0, 1, 1);
27
+ scene.add(dir);
28
+
29
+ // ---------------- LOAD MODEL ----------------
30
+ new GLTFLoader().load("facecap.glb", (gltf) => {
31
+ scene.add(gltf.scene);
32
 
33
+ gltf.scene.traverse((o) => {
34
+ if (o.morphTargetDictionary) {
35
+ mesh = o;
36
+ console.log("FOUND FACE:", o.name);
37
+ console.log("MORPHS:", o.morphTargetDictionary);
38
+ }
39
  });
40
+
41
+ // Center model
42
+ const box = new THREE.Box3().setFromObject(gltf.scene);
43
+ const center = box.getCenter(new THREE.Vector3());
44
+ gltf.scene.position.sub(center);
45
  });
46
 
47
+ // ---------------- UI ----------------
48
+ const textInput = document.getElementById("text");
49
+ const speakBtn = document.getElementById("speak");
50
+
51
+ // ---------------- VISEMES ----------------
52
+ const VISEME_MAP = {
53
+ A: "jawOpen",
54
+ E: "mouthSmile_L",
55
+ O: "mouthFunnel",
56
+ BMP: "mouthClose",
57
+ FV: "mouthPucker",
58
+ neutral: null,
59
+ };
60
+
61
+ function applyVisemeSmooth(name, strength = 1) {
62
  if (!mesh) return;
63
+
64
+ const inf = mesh.morphTargetInfluences;
65
+ const dict = mesh.morphTargetDictionary;
66
+
67
+ // decay all
68
+ for (let i = 0; i < inf.length; i++) {
69
+ inf[i] *= 0.75;
70
+ }
71
+
72
+ if (!name) return;
73
+ const key = VISEME_MAP[name];
74
+ if (!key || dict[key] === undefined) return;
75
+
76
+ inf[dict[key]] = Math.min(1, inf[dict[key]] + 0.6 * strength);
77
  }
78
 
79
+ // ---------------- SPEAK ----------------
80
+ speakBtn.onclick = async () => {
81
+ if (!textInput.value.trim()) return;
82
+
83
  const res = await fetch("/speak", {
84
  method: "POST",
85
+ headers: { "Content-Type": "application/json" },
86
+ body: JSON.stringify({ text: textInput.value }),
87
  });
88
+
89
  const data = await res.json();
90
+ timeline = data.timeline || [];
91
 
92
+ if (audio) audio.pause();
93
  audio = new Audio(data.audio);
94
  await audio.play();
95
+
96
  startTime = performance.now() / 1000;
97
  };
98
 
99
+ // ---------------- ANIMATION LOOP ----------------
100
+ let t = 0;
101
+
102
  function animate() {
103
  requestAnimationFrame(animate);
104
+
105
+ // Idle breathing + micro head motion
106
+ if (mesh) {
107
+ mesh.rotation.y = Math.sin(t * 0.3) * 0.015;
108
+ mesh.rotation.x = Math.sin(t * 0.2) * 0.01;
109
+ }
110
+
111
+ // Lip sync
112
+ if (audio && !audio.paused && timeline.length) {
113
+ const now = performance.now() / 1000 - startTime;
114
+ const cur = timeline.findLast((x) => x.t <= now);
115
+ if (cur) applyVisemeSmooth(cur.v);
116
  }
117
+
118
+ // After speech → gentle smile
119
+ if (audio && audio.ended && mesh) {
120
+ applyVisemeSmooth("E", 0.2);
121
+ }
122
+
123
+ renderer.render(scene, camera);
124
+ t += 0.03;
125
  }
126
+
127
  animate();
128
+
129
+ // ---------------- RESIZE ----------------
130
+ window.addEventListener("resize", () => {
131
+ const size = Math.min(window.innerWidth, window.innerHeight) - 40;
132
+ renderer.setSize(size, size);
133
+ camera.aspect = 1;
134
+ camera.updateProjectionMatrix();
135
+ });