Bnichols413 commited on
Commit
6172dec
·
verified ·
1 Parent(s): 3bcd939

fix the speech and text buttons and, Got it. The right way to do that is to build it so it can **run in your browser for testing** and then **export as an installable Progressive Web App (PWA)** for when you’re ready to carry it with you on your phone or work tablet.

Browse files

Here’s how we’ll structure it so it’s functional right now and portable later.

---

## **Brian’s TechnoBuddy: Structure**

### **1. index.html**

This will hold the layout and visuals:

* full-screen starfield background (Three.js)
* transparent glass console window for your conversations
* a middle canvas for your particle-silhouette avatar
* input box + mic button (speech or typing)

### **2. style.css**

* dark-galaxy gradient fallback for when WebGL is loading
* gold-blue glow accents (#FFD400 and #0948B3)
* smooth glass-blur effects so it feels like a cockpit console

### **3. app.js**

* connects to your OpenAI key for text + audio
* handles whisper transcription from recorded voice
* displays system responses in the console
* animates the elemental avatar states (fire, ice, lightning, water, plasma)
* saves local “tasks” and “XP” in `localStorage` so it can track you day to day

### **4. manifest.json**

* required for PWA install (so you can “Add to Home Screen” and run offline)
* contains name, icon, theme color

### **5. service-worker.js**

* caches assets for offline use
* keeps your TechnoBuddy usable even without connection

---

## **How It’ll Work**

1. You talk or type → Whisper endpoint turns speech into text.
2. The text goes to GPT → it replies like your co-technician, with the problem-solving mindset you’ve trained it for.
3. The avatar pulses and morphs according to emotion / energy of the conversation.
4. The response is spoken back to you using Web Speech API (no extra cost).
5. Your activity log updates automatically.

---

## **Deployment**

1. In DeepSite: create a new project.
2. Add the five files above.
3. Paste the code I’ll give you for each file.
4. Click **Run** → You’ll see the live galaxy background and TechnoBuddy online.
5. Once it’s solid, I’ll show you how to generate the PWA manifest so you can install it directly to your phone.

---

If you’re ready, I’ll generate the **exact HTML / CSS / JS** for the working browser version first (it’ll use your OpenAI key for both chat and transcription).
Confirm you’re ready for that code drop, and I’ll give you the full copy-paste build.

Files changed (5) hide show
  1. index.html +19 -5
  2. manifest.json +23 -0
  3. script.js +65 -1
  4. service-worker.js +26 -0
  5. style.css +14 -0
index.html CHANGED
@@ -51,7 +51,6 @@
51
  <p class="mt-4 text-blue-400">> System Ready. Speak your mind.</p>
52
  </div>
53
  </div>
54
-
55
  <!-- Input Area -->
56
  <div class="bg-gray-900/50 rounded-lg p-1 border border-gray-700 flex">
57
  <input
@@ -60,14 +59,16 @@
60
  placeholder="Type or speak your thoughts..."
61
  class="flex-1 bg-transparent outline-none px-4 py-3 text-gray-200 placeholder-gray-500"
62
  >
63
- <button id="voice-btn" class="p-3 text-gray-400 hover:text-blue-400 transition-colors">
64
  <i data-feather="mic"></i>
 
65
  </button>
66
- <button id="send-btn" class="p-3 bg-blue-600 hover:bg-blue-500 rounded-lg ml-2 transition-colors">
67
  <i data-feather="send"></i>
68
  </button>
69
  </div>
70
- </div>
 
71
  </div>
72
 
73
  <!-- XP Tracker -->
@@ -118,6 +119,19 @@
118
  galaxy.init();
119
  });
120
  </script>
121
- <script src="https://huggingface.co/deepsite/deepsite-badge.js"></script>
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  </body>
123
  </html>
 
51
  <p class="mt-4 text-blue-400">> System Ready. Speak your mind.</p>
52
  </div>
53
  </div>
 
54
  <!-- Input Area -->
55
  <div class="bg-gray-900/50 rounded-lg p-1 border border-gray-700 flex">
56
  <input
 
59
  placeholder="Type or speak your thoughts..."
60
  class="flex-1 bg-transparent outline-none px-4 py-3 text-gray-200 placeholder-gray-500"
61
  >
62
+ <button id="voice-btn" class="p-3 relative text-gray-400 hover:text-blue-400 transition-colors">
63
  <i data-feather="mic"></i>
64
+ <span id="recording-indicator" class="hidden absolute top-0 right-0 w-2.5 h-2.5 bg-red-500 rounded-full animate-pulse"></span>
65
  </button>
66
+ <button id="send-btn" class="p-3 bg-blue-600 hover:bg-blue-500 rounded-lg ml-2 transition-colors disabled:opacity-50 disabled:cursor-not-allowed" disabled>
67
  <i data-feather="send"></i>
68
  </button>
69
  </div>
70
+ <div id="voice-feedback" class="text-xs text-blue-400 font-mono hidden mt-1">Listening...</div>
71
+ </div>
72
  </div>
73
 
74
  <!-- XP Tracker -->
 
119
  galaxy.init();
120
  });
121
  </script>
122
+ <script>
123
+ // Register service worker
124
+ if ('serviceWorker' in navigator) {
125
+ window.addEventListener('load', () => {
126
+ navigator.serviceWorker.register('/service-worker.js')
127
+ .then(registration => {
128
+ console.log('ServiceWorker registration successful');
129
+ })
130
+ .catch(err => {
131
+ console.log('ServiceWorker registration failed: ', err);
132
+ });
133
+ });
134
+ }
135
+ </script>
136
  </body>
137
  </html>
manifest.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```json
2
+ {
3
+ "name": "MindMirror Galaxy Interface",
4
+ "short_name": "MindMirror",
5
+ "description": "Brian's Clarity Companion interface",
6
+ "start_url": "/",
7
+ "display": "standalone",
8
+ "background_color": "#111827",
9
+ "theme_color": "#1E40AF",
10
+ "icons": [
11
+ {
12
+ "src": "/static/icon-192.png",
13
+ "type": "image/png",
14
+ "sizes": "192x192"
15
+ },
16
+ {
17
+ "src": "/static/icon-512.png",
18
+ "type": "image/png",
19
+ "sizes": "512x512"
20
+ }
21
+ ]
22
+ }
23
+ ```
script.js CHANGED
@@ -1,4 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  class GalaxyBackground {
2
- constructor(container) {
3
  this.container = container;
4
  this.scene = null;
 
1
+
2
+ // Speech recognition setup
3
+ const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
4
+ let recognition = null;
5
+ let isRecognizing = false;
6
+
7
+ if (SpeechRecognition) {
8
+ recognition = new SpeechRecognition();
9
+ recognition.continuous = false;
10
+ recognition.interimResults = false;
11
+ recognition.lang = 'en-US';
12
+
13
+ recognition.onresult = (event) => {
14
+ const transcript = event.results[0][0].transcript;
15
+ document.getElementById('user-input').value = transcript;
16
+ document.getElementById('voice-feedback').classList.add('hidden');
17
+ document.getElementById('recording-indicator').classList.add('hidden');
18
+ document.getElementById('send-btn').disabled = false;
19
+ };
20
+
21
+ recognition.onerror = (event) => {
22
+ console.error('Speech recognition error', event.error);
23
+ document.getElementById('voice-feedback').textContent = `Error: ${event.error}`;
24
+ setTimeout(() => {
25
+ document.getElementById('voice-feedback').classList.add('hidden');
26
+ }, 3000);
27
+ document.getElementById('recording-indicator').classList.add('hidden');
28
+ };
29
+
30
+ recognition.onend = () => {
31
+ isRecognizing = false;
32
+ document.getElementById('voice-btn').classList.remove('text-red-400');
33
+ };
34
+ }
35
+
36
+ document.addEventListener('DOMContentLoaded', function() {
37
+ const voiceBtn = document.getElementById('voice-btn');
38
+ const sendBtn = document.getElementById('send-btn');
39
+ const userInput = document.getElementById('user-input');
40
+
41
+ // Voice input handler
42
+ voiceBtn.addEventListener('click', () => {
43
+ if (!recognition) {
44
+ alert('Speech recognition not supported in your browser');
45
+ return;
46
+ }
47
+
48
+ if (!isRecognizing) {
49
+ recognition.start();
50
+ isRecognizing = true;
51
+ document.getElementById('voice-feedback').classList.remove('hidden');
52
+ document.getElementById('recording-indicator').classList.remove('hidden');
53
+ document.getElementById('voice-btn').classList.add('text-red-400');
54
+ } else {
55
+ recognition.stop();
56
+ }
57
+ });
58
+
59
+ // Text input handler
60
+ userInput.addEventListener('input', () => {
61
+ sendBtn.disabled = userInput.value.trim() === '';
62
+ });
63
+ });
64
+
65
  class GalaxyBackground {
66
+ constructor(container) {
67
  this.container = container;
68
  this.scene = null;
service-worker.js ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const CACHE_NAME = 'mindmirror-v1';
2
+ const ASSETS = [
3
+ '/',
4
+ '/index.html',
5
+ '/style.css',
6
+ '/script.js',
7
+ '/static/icon-192.png',
8
+ '/static/icon-512.png',
9
+ 'https://cdn.tailwindcss.com',
10
+ 'https://unpkg.com/feather-icons',
11
+ 'https://cdn.jsdelivr.net/npm/feather-icons/dist/feather.min.js'
12
+ ];
13
+
14
+ self.addEventListener('install', (event) => {
15
+ event.waitUntil(
16
+ caches.open(CACHE_NAME)
17
+ .then((cache) => cache.addAll(ASSETS))
18
+ );
19
+ });
20
+
21
+ self.addEventListener('fetch', (event) => {
22
+ event.respondWith(
23
+ caches.match(event.request)
24
+ .then((response) => response || fetch(event.request))
25
+ );
26
+ });
style.css CHANGED
@@ -49,6 +49,20 @@ body {
49
  .glow-blue:hover {
50
  box-shadow: 0 0 20px rgba(59, 130, 246, 0.7);
51
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
  /* Responsive adjustments */
54
  @media (max-width: 768px) {
 
49
  .glow-blue:hover {
50
  box-shadow: 0 0 20px rgba(59, 130, 246, 0.7);
51
  }
52
+ /* PWA and speech UI enhancements */
53
+ @media all and (display-mode: standalone) {
54
+ body {
55
+ overscroll-behavior-y: contain;
56
+ }
57
+ }
58
+
59
+ #user-input:focus {
60
+ box-shadow: 0 0 0 2px rgba(96, 165, 250, 0.5);
61
+ }
62
+
63
+ #send-btn:not(:disabled) {
64
+ box-shadow: 0 0 12px rgba(59, 130, 246, 0.6);
65
+ }
66
 
67
  /* Responsive adjustments */
68
  @media (max-width: 768px) {