remiai3 commited on
Commit
b12fedd
·
verified ·
1 Parent(s): 016d364

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +39 -60
  2. .hintrc +15 -0
  3. README.md +168 -0
  4. assets/icons/globe.svg +15 -0
  5. assets/icons/message-circle.svg +13 -0
  6. bin/ffmpeg.exe +3 -0
  7. bin/ffplay.exe +3 -0
  8. document.md +132 -0
  9. engine/cpu_avx/SDL2.dll +3 -0
  10. engine/cpu_avx/api.py +98 -0
  11. engine/cpu_avx/bujji_engine.exe +3 -0
  12. engine/cpu_avx/espeak-ng-data/af_dict +3 -0
  13. engine/cpu_avx/espeak-ng-data/am_dict +0 -0
  14. engine/cpu_avx/espeak-ng-data/an_dict +0 -0
  15. engine/cpu_avx/espeak-ng-data/ar_dict +3 -0
  16. engine/cpu_avx/espeak-ng-data/as_dict +0 -0
  17. engine/cpu_avx/espeak-ng-data/az_dict +0 -0
  18. engine/cpu_avx/espeak-ng-data/ba_dict +0 -0
  19. engine/cpu_avx/espeak-ng-data/be_dict +0 -0
  20. engine/cpu_avx/espeak-ng-data/bg_dict +0 -0
  21. engine/cpu_avx/espeak-ng-data/bn_dict +0 -0
  22. engine/cpu_avx/espeak-ng-data/bpy_dict +0 -0
  23. engine/cpu_avx/espeak-ng-data/bs_dict +0 -0
  24. engine/cpu_avx/espeak-ng-data/ca_dict +0 -0
  25. engine/cpu_avx/espeak-ng-data/chr_dict +0 -0
  26. engine/cpu_avx/espeak-ng-data/cmn_dict +3 -0
  27. engine/cpu_avx/espeak-ng-data/cs_dict +0 -0
  28. engine/cpu_avx/espeak-ng-data/cv_dict +0 -0
  29. engine/cpu_avx/espeak-ng-data/cy_dict +0 -0
  30. engine/cpu_avx/espeak-ng-data/da_dict +3 -0
  31. engine/cpu_avx/espeak-ng-data/de_dict +0 -0
  32. engine/cpu_avx/espeak-ng-data/el_dict +0 -0
  33. engine/cpu_avx/espeak-ng-data/en_dict +3 -0
  34. engine/cpu_avx/espeak-ng-data/eo_dict +0 -0
  35. engine/cpu_avx/espeak-ng-data/es_dict +0 -0
  36. engine/cpu_avx/espeak-ng-data/et_dict +0 -0
  37. engine/cpu_avx/espeak-ng-data/eu_dict +0 -0
  38. engine/cpu_avx/espeak-ng-data/fa_dict +3 -0
  39. engine/cpu_avx/espeak-ng-data/fi_dict +0 -0
  40. engine/cpu_avx/espeak-ng-data/fr_dict +0 -0
  41. engine/cpu_avx/espeak-ng-data/ga_dict +0 -0
  42. engine/cpu_avx/espeak-ng-data/gd_dict +0 -0
  43. engine/cpu_avx/espeak-ng-data/gn_dict +0 -0
  44. engine/cpu_avx/espeak-ng-data/grc_dict +0 -0
  45. engine/cpu_avx/espeak-ng-data/gu_dict +0 -0
  46. engine/cpu_avx/espeak-ng-data/hak_dict +0 -0
  47. engine/cpu_avx/espeak-ng-data/haw_dict +0 -0
  48. engine/cpu_avx/espeak-ng-data/he_dict +0 -0
  49. engine/cpu_avx/espeak-ng-data/hi_dict +0 -0
  50. engine/cpu_avx/espeak-ng-data/hr_dict +0 -0
.gitattributes CHANGED
@@ -1,60 +1,39 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.avro filter=lfs diff=lfs merge=lfs -text
4
- *.bin filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ckpt filter=lfs diff=lfs merge=lfs -text
7
- *.ftz filter=lfs diff=lfs merge=lfs -text
8
- *.gz filter=lfs diff=lfs merge=lfs -text
9
- *.h5 filter=lfs diff=lfs merge=lfs -text
10
- *.joblib filter=lfs diff=lfs merge=lfs -text
11
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
12
- *.lz4 filter=lfs diff=lfs merge=lfs -text
13
- *.mds filter=lfs diff=lfs merge=lfs -text
14
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
15
- *.model filter=lfs diff=lfs merge=lfs -text
16
- *.msgpack filter=lfs diff=lfs merge=lfs -text
17
- *.npy filter=lfs diff=lfs merge=lfs -text
18
- *.npz filter=lfs diff=lfs merge=lfs -text
19
- *.onnx filter=lfs diff=lfs merge=lfs -text
20
- *.ot filter=lfs diff=lfs merge=lfs -text
21
- *.parquet filter=lfs diff=lfs merge=lfs -text
22
- *.pb filter=lfs diff=lfs merge=lfs -text
23
- *.pickle filter=lfs diff=lfs merge=lfs -text
24
- *.pkl filter=lfs diff=lfs merge=lfs -text
25
- *.pt filter=lfs diff=lfs merge=lfs -text
26
- *.pth filter=lfs diff=lfs merge=lfs -text
27
- *.rar filter=lfs diff=lfs merge=lfs -text
28
- *.safetensors filter=lfs diff=lfs merge=lfs -text
29
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
30
- *.tar.* filter=lfs diff=lfs merge=lfs -text
31
- *.tar filter=lfs diff=lfs merge=lfs -text
32
- *.tflite filter=lfs diff=lfs merge=lfs -text
33
- *.tgz filter=lfs diff=lfs merge=lfs -text
34
- *.wasm filter=lfs diff=lfs merge=lfs -text
35
- *.xz filter=lfs diff=lfs merge=lfs -text
36
- *.zip filter=lfs diff=lfs merge=lfs -text
37
- *.zst filter=lfs diff=lfs merge=lfs -text
38
- *tfevents* filter=lfs diff=lfs merge=lfs -text
39
- # Audio files - uncompressed
40
- *.pcm filter=lfs diff=lfs merge=lfs -text
41
- *.sam filter=lfs diff=lfs merge=lfs -text
42
- *.raw filter=lfs diff=lfs merge=lfs -text
43
- # Audio files - compressed
44
- *.aac filter=lfs diff=lfs merge=lfs -text
45
- *.flac filter=lfs diff=lfs merge=lfs -text
46
- *.mp3 filter=lfs diff=lfs merge=lfs -text
47
- *.ogg filter=lfs diff=lfs merge=lfs -text
48
- *.wav filter=lfs diff=lfs merge=lfs -text
49
- # Image files - uncompressed
50
- *.bmp filter=lfs diff=lfs merge=lfs -text
51
- *.gif filter=lfs diff=lfs merge=lfs -text
52
- *.png filter=lfs diff=lfs merge=lfs -text
53
- *.tiff filter=lfs diff=lfs merge=lfs -text
54
- # Image files - compressed
55
- *.jpg filter=lfs diff=lfs merge=lfs -text
56
- *.jpeg filter=lfs diff=lfs merge=lfs -text
57
- *.webp filter=lfs diff=lfs merge=lfs -text
58
- # Video files - compressed
59
- *.mp4 filter=lfs diff=lfs merge=lfs -text
60
- *.webm filter=lfs diff=lfs merge=lfs -text
 
1
+ *.gguf filter=lfs diff=lfs merge=lfs -text
2
+ *.ggml filter=lfs diff=lfs merge=lfs -text
3
+ *.ort filter=lfs diff=lfs merge=lfs -text
4
+ *.exe filter=lfs diff=lfs merge=lfs -text
5
+ *.dll filter=lfs diff=lfs merge=lfs -text
6
+ *.wav filter=lfs diff=lfs merge=lfs -text
7
+ *.ico filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.bin filter=lfs diff=lfs merge=lfs -text
10
+ engine/cpu_avx/espeak-ng-data/af_dict filter=lfs diff=lfs merge=lfs -text
11
+ engine/cpu_avx/espeak-ng-data/ar_dict filter=lfs diff=lfs merge=lfs -text
12
+ engine/cpu_avx/espeak-ng-data/cmn_dict filter=lfs diff=lfs merge=lfs -text
13
+ engine/cpu_avx/espeak-ng-data/da_dict filter=lfs diff=lfs merge=lfs -text
14
+ engine/cpu_avx/espeak-ng-data/en_dict filter=lfs diff=lfs merge=lfs -text
15
+ engine/cpu_avx/espeak-ng-data/fa_dict filter=lfs diff=lfs merge=lfs -text
16
+ engine/cpu_avx/espeak-ng-data/hu_dict filter=lfs diff=lfs merge=lfs -text
17
+ engine/cpu_avx/espeak-ng-data/ia_dict filter=lfs diff=lfs merge=lfs -text
18
+ engine/cpu_avx/espeak-ng-data/it_dict filter=lfs diff=lfs merge=lfs -text
19
+ engine/cpu_avx/espeak-ng-data/lb_dict filter=lfs diff=lfs merge=lfs -text
20
+ engine/cpu_avx/espeak-ng-data/phondata filter=lfs diff=lfs merge=lfs -text
21
+ engine/cpu_avx/espeak-ng-data/ru_dict filter=lfs diff=lfs merge=lfs -text
22
+ engine/cpu_avx/espeak-ng-data/ta_dict filter=lfs diff=lfs merge=lfs -text
23
+ engine/cpu_avx/espeak-ng-data/ur_dict filter=lfs diff=lfs merge=lfs -text
24
+ engine/cpu_avx/espeak-ng-data/yue_dict filter=lfs diff=lfs merge=lfs -text
25
+ engine/cpu_avx2/espeak-ng-data/af_dict filter=lfs diff=lfs merge=lfs -text
26
+ engine/cpu_avx2/espeak-ng-data/ar_dict filter=lfs diff=lfs merge=lfs -text
27
+ engine/cpu_avx2/espeak-ng-data/cmn_dict filter=lfs diff=lfs merge=lfs -text
28
+ engine/cpu_avx2/espeak-ng-data/da_dict filter=lfs diff=lfs merge=lfs -text
29
+ engine/cpu_avx2/espeak-ng-data/en_dict filter=lfs diff=lfs merge=lfs -text
30
+ engine/cpu_avx2/espeak-ng-data/fa_dict filter=lfs diff=lfs merge=lfs -text
31
+ engine/cpu_avx2/espeak-ng-data/hu_dict filter=lfs diff=lfs merge=lfs -text
32
+ engine/cpu_avx2/espeak-ng-data/ia_dict filter=lfs diff=lfs merge=lfs -text
33
+ engine/cpu_avx2/espeak-ng-data/it_dict filter=lfs diff=lfs merge=lfs -text
34
+ engine/cpu_avx2/espeak-ng-data/lb_dict filter=lfs diff=lfs merge=lfs -text
35
+ engine/cpu_avx2/espeak-ng-data/phondata filter=lfs diff=lfs merge=lfs -text
36
+ engine/cpu_avx2/espeak-ng-data/ru_dict filter=lfs diff=lfs merge=lfs -text
37
+ engine/cpu_avx2/espeak-ng-data/ta_dict filter=lfs diff=lfs merge=lfs -text
38
+ engine/cpu_avx2/espeak-ng-data/ur_dict filter=lfs diff=lfs merge=lfs -text
39
+ engine/cpu_avx2/espeak-ng-data/yue_dict filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.hintrc ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "extends": [
3
+ "development"
4
+ ],
5
+ "hints": {
6
+ "compat-api/css": [
7
+ "default",
8
+ {
9
+ "ignore": [
10
+ "backdrop-filter"
11
+ ]
12
+ }
13
+ ]
14
+ }
15
+ }
README.md ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # RemiAI Open Source Framework
2
+
3
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
4
+ [![Build: Electron](https://img.shields.io/badge/Build-Electron-blue.svg)](https://www.electronjs.org/)
5
+ [![Model: GGUF](https://img.shields.io/badge/Model-GGUF-green.svg)](https://huggingface.co/)
6
+ [![TTS: Piper](https://img.shields.io/badge/TTS-Piper-purple.svg)](https://github.com/rhasspy/piper)
7
+ [![STT: Whisper](https://img.shields.io/badge/STT-Whisper-orange.svg)](https://github.com/ggerganov/whisper.cpp)
8
+
9
+ **A "No-Setup" Local AI Framework for Students**
10
+
11
+ This project is an open-source, offline AI application designed for students and colleges. It allows you to run powerful LLMs (like Llama 3, Mistral, etc.) on your laptop without needing GPU, internet, Python, or complicated installations. It now includes **Text-to-Speech (TTS)** using Piper and **Speech-to-Text (STT)** using Whisper — all running 100% offline.
12
+
13
+ **Note** - No need any GPU in your laptop to run, it will use the CPU in your laptop for the response generation (inference). If you want to modify the project code and use another model make sure that your are using the `.gguf` formated weights only. Normal weights like `.safetensors` or `.bin` (PyTorch) will NOT work.
14
+
15
+ **New in v2.1:**
16
+ * **Dynamic Resource Management**: To save CPU/RAM, the massive Text Generation model now automatically unloads when you switch to STT, TTS, or Web Browser tabs. It reloads when you return to Chat.
17
+ * **Debug Logging**: If issues arise in the packaged app, check the `app_debug.log` file created on your Desktop.
18
+ * **Manual Audio Conversion**: Enhanced STT stability by auto-converting audio formats before processing.
19
+ * **Known Issue**: Sometimes after switching back to Chat from other tabs, the status says "Connecting..." indefinitely. **Fix: Click the "Refresh App" button in the sidebar.**
20
+ ---
21
+
22
+ ## 🚀 Quick Start (One-Line Command)
23
+
24
+ If you have Git and Node.js installed, open your terminal (Command Prompt or PowerShell) and run:
25
+
26
+ `for powershell`
27
+ ```bash
28
+ git clone https://huggingface.co/remiai3/RemiAI_Framework
29
+ cd RemiAI_Framework
30
+ git lfs install
31
+ git lfs pull
32
+ npm install
33
+ npm start
34
+ ```
35
+
36
+ ### ⚠️ IMPORTANT: Git LFS Required
37
+ This repository uses **Git Large File Storage (LFS)** for the AI engine binaries.
38
+ **If you download the ZIP or clone without LFS, the app will not work (Error: "RemiAI engine missing").**
39
+
40
+ ---
41
+
42
+ ## 💻 Manual Installation
43
+
44
+ ### 1. Requirements
45
+ * **Node.js**: [Download Here](https://nodejs.org/) (Install the LTS version).
46
+ * **Git & Git LFS**: [Download Git](https://git-scm.com/) | [Download Git LFS](https://git-lfs.com/)
47
+ * **Windows Laptop**: (Code includes optimized `.exe` binaries for Windows).
48
+
49
+ ### 2. Download & Setup
50
+ 1. **Download** the project zip (or clone the repo).
51
+ 2. **Extract** the folder.
52
+ 3. **Open Terminal** inside the folder path.
53
+ 4. **Pull Engine Files** (Critical Step):
54
+ ```bash
55
+ git lfs install
56
+ git lfs pull
57
+ ```
58
+ 5. Run the installer for libraries:
59
+ ```bash
60
+ npm install
61
+ ```
62
+
63
+ ### 3. Run the App
64
+ Simply type:
65
+ ```bash
66
+ npm start
67
+ ```
68
+ The application will launch, the AI engine will start in the background, and you can begin chatting immediately!
69
+
70
+ ---
71
+
72
+ ## 📦 Features
73
+
74
+ * **💬 AI Chat (Text Generation)**: Chat with powerful LLMs running locally on your CPU.
75
+ * **Zero Python Dependency**: We use compiled binaries (`.dll` and `.exe` included) so you don't need to install Python, PyTorch, or set up virtual environments.
76
+ * **Plug & Play Models**: Supports `.gguf` format.
77
+ * Want a different model? Download any `.gguf` file, rename it to `model.gguf`, and place it in the project root.
78
+ * **Auto-Optimization**: Automatically detects your CPU features (AVX vs AVX2) to give you the best speed possible.
79
+ * **Privacy First**: Runs 100% offline. No data leaves your device.
80
+ * **Dynamic Resource Loading**: Automatically unloads heavy AI models when not in use (e.g., when using Browser or TTS) to free up system resources.
81
+ * **🔊 Text-to-Speech (TTS)**: Convert any text to natural-sounding English speech using the **Piper** engine.
82
+ * Click the speaker icon in the sidebar → type text → click "Speak" → listen and download `.wav` files.
83
+ * Voice model: `en_US-lessac-medium.onnx` (replaceable with other Piper voices).
84
+ * **🎙️ Speech-to-Text (STT)**: Extract text from audio files using the **Whisper** engine.
85
+ * Click the microphone icon in the sidebar → browse for audio file → click "Transcribe" → copy result text.
86
+ * Supports: `.wav`, `.mp3`, `.m4a`, `.ogg`, `.flac` formats.
87
+ * Requires `ffmpeg.exe` and `ffmpeg.dll` in the `bin/` folder.
88
+ * **🌐 Built-in Web Browser**: Integrated browser with tabs, bookmarks, and navigation.
89
+ * **🎨 Offline UI**: All icons (Lucide) and libraries (Marked.js) are bundled locally — no CDN required.
90
+
91
+ ## ⚠️ Capabilities & Limitations
92
+
93
+ * **Supported Models**: Exclusively supports `.gguf` format models (running via `llama.cpp` backend). Does NOT support `.safetensors`, `.pth`, or Python-based models directly.
94
+ * **Disk Space**: The final packaged application size depends heavily on the model you include.
95
+ * Base App (Electron + Engines): ~300MB
96
+ * + Gemma-2b Model: ~2GB
97
+ * **Total Installer Size**: ~2.5GB (Packaged).
98
+ * **Memory Usage**: Requires at least 4GB RAM free. The app now dynamically manages memory by unloading the chat model when using other tools.
99
+ * **Startup Time**: The Chat model may take 5-10 seconds to reload when switching back from other tabs. If it gets stuck, use the "Refresh App" button.
100
+
101
+ ---
102
+
103
+ ## 📂 Project Structure
104
+
105
+ ```text
106
+ Root/
107
+ ├── engine/ # AI Backend Engines
108
+ │ ├── cpu_avx/ # Fallback binaries (AVX)
109
+ │ │ ├── bujji_engine.exe # LLM inference server
110
+ │ │ ├── piper.exe # TTS engine
111
+ │ │ └── whisper.exe # STT server
112
+ │ ├── cpu_avx2/ # High-performance binaries (AVX2)
113
+ │ │ ├── bujji_engine.exe
114
+ │ │ ├── piper.exe
115
+ │ │ └── whisper.exe
116
+ │ ├── piper/ # TTS model
117
+ │ │ └── en_US-lessac-medium.onnx
118
+ │ └── whisper/ # STT model
119
+ │ └── ggml-base.en.bin
120
+ ├── bin/ # Utility binaries
121
+ │ ├── ffmpeg.exe # Audio conversion (required for STT)
122
+ │ ├── ffmpeg.dll # FFmpeg library
123
+ │ └── ffplay.exe # Audio playback
124
+ ├── assets/icons/ # Local SVG icons
125
+ ├── model.gguf # The AI Model
126
+ ├── main.js # Core Logic (Main Process)
127
+ ├── index.html # UI Layer
128
+ ├── renderer.js # Frontend Logic
129
+ ├── styles.css # Styling
130
+ ├── web.html # Built-in Web Browser
131
+ └── package.json # Dependencies
132
+ ```
133
+
134
+ ## ❓ Troubleshooting
135
+
136
+ **Error: "RemiAI Engine Missing"**
137
+ This means you downloaded the "pointer" files (130 bytes) instead of the real engine.
138
+ **Fix**:
139
+ 1. Open terminal in project folder.
140
+ 2. Run `git lfs install`
141
+ 3. Run `git lfs pull`
142
+ 4. Restart the app.
143
+
144
+ **Error: "Piper TTS executable not found" or "Piper TTS model not found"**
145
+ * Ensure `piper.exe` is in `engine/cpu_avx2/` (or `engine/cpu_avx/`).
146
+ * Ensure `en_US-lessac-medium.onnx` is in `engine/piper/`.
147
+ * Run `git lfs pull` to download all engine binaries.
148
+
149
+ **Error: "Whisper server failed to start"**
150
+ * Ensure `whisper.exe` is in `engine/cpu_avx2/` (or `engine/cpu_avx/`).
151
+ * **Critical**: Ensure `ffmpeg.exe` and `ffmpeg.dll` are in the `bin/` folder. The Whisper server requires FFmpeg.
152
+ * Run `git lfs pull` to download all engine binaries.
153
+
154
+ **Error: "No speech detected"**
155
+ * Ensure your audio file contains clear English speech.
156
+ * Try with a `.wav` file first for best results.
157
+
158
+ ---
159
+
160
+ ## 🛠️ Credits & License
161
+
162
+ * **Created By**: RemiAI Team
163
+ * **License**: MIT License.
164
+ * *You are free to rename, modify, and distribute this application as your own project!*
165
+
166
+ **Note on Models**: The application will only uses the `.gguf` formated weights only to make it as the CPU friendly run the application without any GPU
167
+ ---
168
+ >>>>>>> 7eaf733 (Initial commit - Clean push)
assets/icons/globe.svg ADDED
assets/icons/message-circle.svg ADDED
bin/ffmpeg.exe ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e4b93a3f40121432e5350928a8012b2339575eb03666795b348888e1b6ab444
3
+ size 92387840
bin/ffplay.exe ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e73f33d7227aec79dd58f2f5ab4d2c78fc48e35da43519641a7d30359efe7fd
3
+ size 219133440
document.md ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Student & Developer Documentation
2
+
3
+ ## Overview
4
+ Welcome to the RemiAI Framework! This document is designed to help you understand how to customize, configure, and make this application your own. This framework is built to be "Plug-and-Play"—meaning you don't need to know Python or complex AI coding to use it. It includes **Text Generation** (chat with AI), **Text-to-Speech** (TTS — convert text to voice), and **Speech-to-Text** (STT — extract text from audio files), all running 100% offline.
5
+
6
+ ## 🛠️ Setup & How to Customize
7
+
8
+ ### 0. Quick Setup (Important!)
9
+ Before running the app, you **must** ensure the AI engine files are downloaded correctly. GitHub does not store large files directly, so we use **Git LFS**.
10
+
11
+ 1. **Install Git LFS**:
12
+ * Download and install from [git-lfs.com](https://git-lfs.com).
13
+ * Open a terminal and run: `git lfs install`
14
+ 2. **Pull Files**:
15
+ * Run: `git lfs pull` inside the project folder.
16
+ * *Why?* Without this, the app will say **"RemiAI Engine Missing"** or "Connection Refused".
17
+
18
+ ### 1. Changing the AI Name
19
+ Want to name the AI "Jarvis" or "MyBot"?
20
+ 1. Open `index.html` in any text editor (VS Code, Notepad, etc.).
21
+ 2. Search for "RemiAI" or "Bujji".
22
+ 3. Replace the text with your desired name.
23
+ 4. Save the file.
24
+ 5. Restart the app (`npm start`), and your new name will appear!
25
+
26
+ ### 2. Replacing the AI Model
27
+ This application is powered by a **GGUF** model file. You can swap this "brain" for a smarter one, a faster one, or one specialized in coding/storytelling.
28
+
29
+ **Steps to Change the Model:**
30
+ 1. **Download a Model**: Go to [Hugging Face](https://huggingface.co/models?library=gguf) and search for GGUF models (e.g., `Llama-3-8B-GGUF`, `Mistral-7B-GGUF`).
31
+ 2. **Select File**: Download the `.gguf` file (Q4_K_M or Q5_K_M are good balances of speed and intelligence).
32
+ 3. **Rename**: Rename your downloaded file to exactly:
33
+ > **`model.gguf`**
34
+ 4. **Replace**:
35
+ * Go to the `engine` folder in your project directory.
36
+ * Paste your new `model.gguf` there, replacing the old one (or place it one level up depending on your specific folder setup—check `main.js` which looks for `../model.gguf` relative to the engine binary). *Note: Standard setup is usually placing `model.gguf` in the root or `engine` folder as configured.*
37
+ 5. **Restart**: Run `npm start`. The app will now use the new intelligence!
38
+
39
+ **Note**: Make sure your laptop have good health don't use laptop more then 5 years old because running an entire Gen AI model weights or Neural Network will damage the laptop - your laptop may stucks, over heat, shutdown automatically and some it will make your laptop or device dead so be carefule
40
+ **GOOD CONFIGURATION NO DAMAGE** (i3 processor, 8GB RAM) - if you are using the PC.
41
+ (i5 processor, 16GB RAM) - if you are using the laptop.
42
+ new i3 with 8GB RAM laptop's will easily runs but the laptop want to be new and good heavy if the laptop was too old it will not work even you have i5 processor and 16GB RAM
43
+
44
+ ### 3. Customizing the UI
45
+ All styles are in `styles.css` (or within `index.html`).
46
+ * **Colors**: Change the background colors or chat bubble colors in the CSS.
47
+ * **Icons**: Replace `remiai.ico` with your own `.ico` file to change the app icon.
48
+
49
+ ### 4. Using Text-to-Speech (TTS)
50
+ The TTS feature converts typed text into natural-sounding English speech using the **Piper** engine.
51
+
52
+ **How to Use:**
53
+ 1. Click the **🔊 Speaker icon** in the sidebar.
54
+ 2. Type the text you want to hear in the text area.
55
+ 3. Click **"Speak"** — the audio will generate and play automatically.
56
+ 4. Click **"Download Audio"** to save the `.wav` file to your preferred location (a native Save dialog will appear).
57
+
58
+ **Customization:**
59
+ * The TTS voice model is stored at `engine/piper/en_US-lessac-medium.onnx`.
60
+ * You can replace it with other Piper ONNX voice models from [Piper Voices](https://github.com/rhasspy/piper/blob/master/VOICES.md).
61
+ * Download a new `.onnx` model + its `.json` config file and place them in `engine/piper/`.
62
+
63
+ ### 5. Using Speech-to-Text (STT)
64
+ The STT feature extracts text from audio files using the **Whisper** engine (runs as a local server).
65
+
66
+ **How to Use:**
67
+ 1. Click the **🎙️ Microphone icon** in the sidebar.
68
+ 2. Click **"Browse Audio File"** to select your audio file.
69
+ 3. Supported formats: `.wav`, `.mp3`, `.m4a`, `.ogg`, `.flac`.
70
+ 4. Click **"Transcribe"** — wait for processing (10-30 seconds depending on file length).
71
+ 5. The transcribed text will appear below. Click **"Copy"** to copy it to your clipboard.
72
+
73
+ **Requirements:**
74
+ * `ffmpeg.exe` and `ffmpeg.dll` must be present in the `bin/` folder for audio format conversion.
75
+ * If missing, download FFmpeg from [ffmpeg.org](https://ffmpeg.org/download.html) and place the files in `bin/`.
76
+
77
+ ### 6. Dynamic Resource Management (New!)
78
+ To ensure the application runs smoothly even on lower-end devices, we implemented a dynamic resource management system.
79
+ * **Behavior**: When you are in the **Chat** tab, the heavy AI model (Text Generation) is loaded into RAM.
80
+ * **Optimization**: When you switch to **TTS**, **STT**, or **Web Browser** tabs, the main AI model is **automatically unloaded/stopped**. This frees up to 2GB+ of RAM and significant CPU usage, allowing the TTS/STT engines to run faster and the browser to be more responsive.
81
+ * **Reloading**: When you switch back to the **Chat** tab, the model automatically restarts.
82
+ * *Note: You might see "Connecting..." for a few seconds. If it stays stuck, click the "Refresh App" button.*
83
+
84
+ ### 7. Offline Dependencies
85
+ All libraries are bundled locally — **no internet needed** after initial setup:
86
+ * **Lucide Icons**: Loaded from `node_modules/lucide/` (not from CDN).
87
+ * **Marked.js**: Loaded from `node_modules/marked/` (not from CDN).
88
+ * If icons or markdown rendering is broken, simply run `npm install` to restore them.
89
+
90
+ ## ❓ Frequently Asked Questions (FAQ)
91
+
92
+ **Q: Do I need Python?**
93
+ A: **No.** The application comes with a pre-compiled engine (`bujji_engine.exe` / `llama-server.exe`) that runs the model directly.
94
+
95
+ **Q: Why does it say "AVX2"?**
96
+ A: AVX2 is a feature in modern CPUs that makes the AI run faster. The app automatically detects if you have it. If not, it switches to a slower but compatible mode (AVX).
97
+
98
+ **Q: The app opens but doesn't reply / "RemiAI Engine Missing" Error.**
99
+ A:
100
+ 1. **Git LFS Issue**: This usually means you downloaded "pointers" (tiny files) instead of the real engine. Open a terminal in the folder and run `git lfs pull`.
101
+ 2. **Model Issue**: Check if `model.gguf` exists in the `engine` folder.
102
+ 3. **Console Check**: Open Developer Tools (Ctrl+Shift+I) to see errors.
103
+
104
+ **Q: I see "Content Security Policy" warnings in the console.**
105
+ A: We have configured safeguards (`index.html` meta tags) to block malicious scripts. The CSP is set to only allow local resources (`'self'`) and the local API server (`127.0.0.1:5000`). All external CDN dependencies have been removed.
106
+
107
+ **Q: How do I build it into an .exe file?**
108
+ A: Run the command:
109
+ ```bash
110
+ npm run dist
111
+ ```
112
+ This will create an installer in the `release` folder that you can share with friends!
113
+
114
+ `if you are facing errors while building open the power shell as an administrator and run the above command then it will works 100%`
115
+
116
+ **Q: TTS says "Piper TTS executable not found".**
117
+ A: Make sure `piper.exe` exists in `engine/cpu_avx2/` (or `engine/cpu_avx/`). Run `git lfs pull` to download all engine binaries.
118
+
119
+ **Q: STT says "Whisper server failed to start".**
120
+ A:
121
+ 1. Check that `whisper.exe` exists in `engine/cpu_avx2/` (or `engine/cpu_avx/`).
122
+ 2. Check that `ffmpeg.exe` and `ffmpeg.dll` are present in the `bin/` folder. The Whisper server needs FFmpeg for audio conversion.
123
+ 3. Run `git lfs pull` to ensure all files are fully downloaded.
124
+
125
+ **Q: STT says "No speech detected".**
126
+ A: Make sure your audio file contains clear English speech. Background noise or non-English audio may cause transcription failures. Try with a clear `.wav` recording first.
127
+
128
+ **Q: Can I use TTS and STT together?**
129
+ A: Yes! You can generate speech with TTS, save the `.wav` file, then upload it to STT to verify the transcription. They work independently and can be used simultaneously.
130
+
131
+ **Q: Does the app need internet to work?**
132
+ A: **No.** After the initial `npm install` and `git lfs pull` setup, the app runs 100% offline. All models, engines, icons, and libraries are bundled locally.
engine/cpu_avx/SDL2.dll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de23db1694a3c7a4a735e7ecd3d214b2023cc2267922c6c35d30c7fc7370d677
3
+ size 2500096
engine/cpu_avx/api.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ import multiprocessing
4
+ from flask import Flask, request, Response
5
+ from waitress import serve
6
+ import json
7
+ import traceback
8
+
9
+ # --- 1. SETUP LOGGING ---
10
+ def log(msg):
11
+ print(f"[ENGINE] {msg}", flush=True)
12
+
13
+ # --- 2. PATH SETUP ---
14
+ if getattr(sys, 'frozen', False):
15
+ BASE_DIR = os.path.dirname(sys.executable)
16
+ else:
17
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
18
+
19
+ MODEL_PATH = os.path.join(BASE_DIR, "model.gguf")
20
+ log(f"Base Directory: {BASE_DIR}")
21
+
22
+ app = Flask(__name__)
23
+
24
+ # --- 3. THE "MONKEY PATCH" (CRITICAL FIX) ---
25
+ # We intercept the library's attempt to set up logging and stop it.
26
+ try:
27
+ import llama_cpp
28
+
29
+ # Create a dummy function that does NOTHING
30
+ def dummy_log_set(callback, user_data):
31
+ return
32
+
33
+ # Overwrite the library's internal function with our dummy
34
+ # Now, when Llama() runs, it CALLS this instead of the C function.
35
+ llama_cpp.llama_log_set = dummy_log_set
36
+
37
+ log("Successfully patched Llama logging.")
38
+ except Exception as e:
39
+ log(f"Patch warning: {e}")
40
+
41
+ # --- 4. LOAD MODEL ---
42
+ llm = None
43
+ try:
44
+ from llama_cpp import Llama
45
+
46
+ total_cores = multiprocessing.cpu_count()
47
+ safe_threads = max(1, int(total_cores * 0.5))
48
+
49
+ if not os.path.exists(MODEL_PATH):
50
+ log("CRITICAL ERROR: model.gguf is missing!")
51
+ else:
52
+ log("Loading Model...")
53
+ llm = Llama(
54
+ model_path=MODEL_PATH,
55
+ n_ctx=4096,
56
+ n_threads=safe_threads,
57
+ n_gpu_layers=0,
58
+ verbose=False,
59
+ chat_format="gemma",
60
+ use_mmap=False
61
+ )
62
+ log("Model Loaded Successfully!")
63
+
64
+ except Exception as e:
65
+ log(f"CRITICAL EXCEPTION during load: {e}")
66
+ log(traceback.format_exc())
67
+
68
+ @app.route('/', methods=['GET'])
69
+ def health_check():
70
+ if llm: return "OK", 200
71
+ return "MODEL_FAILED", 500
72
+
73
+ @app.route('/chat_stream', methods=['POST'])
74
+ def chat_stream():
75
+ if not llm:
76
+ return Response("data: " + json.dumps({'chunk': "Error: Brain failed initialization."}) + "\n\n", mimetype='text/event-stream')
77
+
78
+ data = request.json
79
+ messages = [{"role": "user", "content": data.get('message', '')}]
80
+
81
+ def generate():
82
+ try:
83
+ stream = llm.create_chat_completion(messages=messages, max_tokens=1000, stream=True)
84
+ for chunk in stream:
85
+ if 'content' in chunk['choices'][0]['delta']:
86
+ yield f"data: {json.dumps({'chunk': chunk['choices'][0]['delta']['content']})}\n\n"
87
+ except Exception as e:
88
+ log(f"Gen Error: {e}")
89
+ yield f"data: {json.dumps({'chunk': ' Error.'})}\n\n"
90
+
91
+ return Response(stream_with_context(generate()), mimetype='text/event-stream')
92
+
93
+ if __name__ == '__main__':
94
+ log("Starting Waitress Server on Port 5000...")
95
+ try:
96
+ serve(app, host='127.0.0.1', port=5000, threads=6)
97
+ except Exception as e:
98
+ log(f"Server Crash: {e}")
engine/cpu_avx/bujji_engine.exe ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d7218b54eef84bed96f12d866a6ae451a761fdb609d7b9965fea16b497899d2
3
+ size 3239936
engine/cpu_avx/espeak-ng-data/af_dict ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25729d3bf4c4a0f08da60aea9eb5a0cf352630f83fab1ab0c3955b7740da1776
3
+ size 121473
engine/cpu_avx/espeak-ng-data/am_dict ADDED
Binary file (63.9 kB). View file
 
engine/cpu_avx/espeak-ng-data/an_dict ADDED
Binary file (6.69 kB). View file
 
engine/cpu_avx/espeak-ng-data/ar_dict ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72316426e797777fe4df9420935a3b6a79b37d7e3f3948537ba71cd7b21b2541
3
+ size 478165
engine/cpu_avx/espeak-ng-data/as_dict ADDED
Binary file (5.01 kB). View file
 
engine/cpu_avx/espeak-ng-data/az_dict ADDED
Binary file (43.8 kB). View file
 
engine/cpu_avx/espeak-ng-data/ba_dict ADDED
Binary file (2.1 kB). View file
 
engine/cpu_avx/espeak-ng-data/be_dict ADDED
Binary file (2.65 kB). View file
 
engine/cpu_avx/espeak-ng-data/bg_dict ADDED
Binary file (87.1 kB). View file
 
engine/cpu_avx/espeak-ng-data/bn_dict ADDED
Binary file (90 kB). View file
 
engine/cpu_avx/espeak-ng-data/bpy_dict ADDED
Binary file (5.23 kB). View file
 
engine/cpu_avx/espeak-ng-data/bs_dict ADDED
Binary file (47.1 kB). View file
 
engine/cpu_avx/espeak-ng-data/ca_dict ADDED
Binary file (45.6 kB). View file
 
engine/cpu_avx/espeak-ng-data/chr_dict ADDED
Binary file (2.86 kB). View file
 
engine/cpu_avx/espeak-ng-data/cmn_dict ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:109aaa7708d3727382acb3ae41d8e2094a7e2bb9f651a81835be22a6f08071fe
3
+ size 1566335
engine/cpu_avx/espeak-ng-data/cs_dict ADDED
Binary file (49.6 kB). View file
 
engine/cpu_avx/espeak-ng-data/cv_dict ADDED
Binary file (1.34 kB). View file
 
engine/cpu_avx/espeak-ng-data/cy_dict ADDED
Binary file (43.1 kB). View file
 
engine/cpu_avx/espeak-ng-data/da_dict ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae6c2dd4f0f4d38342918a776a9e2c46d919572f02336c864f843ff7b262caf8
3
+ size 245287
engine/cpu_avx/espeak-ng-data/de_dict ADDED
Binary file (68.3 kB). View file
 
engine/cpu_avx/espeak-ng-data/el_dict ADDED
Binary file (72.8 kB). View file
 
engine/cpu_avx/espeak-ng-data/en_dict ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71bd330ba8a2e3e8076e631508208ef49449d6147c17b7bd2b4b1e1468292e35
3
+ size 166944
engine/cpu_avx/espeak-ng-data/eo_dict ADDED
Binary file (4.67 kB). View file
 
engine/cpu_avx/espeak-ng-data/es_dict ADDED
Binary file (49.3 kB). View file
 
engine/cpu_avx/espeak-ng-data/et_dict ADDED
Binary file (44.3 kB). View file
 
engine/cpu_avx/espeak-ng-data/eu_dict ADDED
Binary file (48.8 kB). View file
 
engine/cpu_avx/espeak-ng-data/fa_dict ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f92f35ce7eb1d97360016617d97786412a94af55b776a18f1c280dab4f43befd
3
+ size 292423
engine/cpu_avx/espeak-ng-data/fi_dict ADDED
Binary file (43.9 kB). View file
 
engine/cpu_avx/espeak-ng-data/fr_dict ADDED
Binary file (63.7 kB). View file
 
engine/cpu_avx/espeak-ng-data/ga_dict ADDED
Binary file (52.7 kB). View file
 
engine/cpu_avx/espeak-ng-data/gd_dict ADDED
Binary file (49.1 kB). View file
 
engine/cpu_avx/espeak-ng-data/gn_dict ADDED
Binary file (3.25 kB). View file
 
engine/cpu_avx/espeak-ng-data/grc_dict ADDED
Binary file (3.43 kB). View file
 
engine/cpu_avx/espeak-ng-data/gu_dict ADDED
Binary file (82.5 kB). View file
 
engine/cpu_avx/espeak-ng-data/hak_dict ADDED
Binary file (3.34 kB). View file
 
engine/cpu_avx/espeak-ng-data/haw_dict ADDED
Binary file (2.44 kB). View file
 
engine/cpu_avx/espeak-ng-data/he_dict ADDED
Binary file (6.96 kB). View file
 
engine/cpu_avx/espeak-ng-data/hi_dict ADDED
Binary file (92.1 kB). View file
 
engine/cpu_avx/espeak-ng-data/hr_dict ADDED
Binary file (49.4 kB). View file