groqnote / public /controller.js
trretretret's picture
basic
26ab438
import { appModel } from './model.js';
import { utils } from './utils.js';
import { view } from './view.js';
const controller = {
async handleRecording(event) {
let startTime = Date.now();
let audioblob = await view.recorder.startRecording(view.elem.voiceButton);
if (Date.now() - startTime < appModel.minimalRecordTime) {
utils.showToast("time too short, this will not transcribe");
return;
}
let transcribe = await utils.stt(audioblob);
if (!transcribe.text) {
console.log("transcribe failed, try alternative way");
transcribe = await whisperjaxws(audioblob); // Replace with your alternative transcription
}
utils.writeText(document.activeElement, transcribe.text);
},
async startRecordingWithSilenceDetection(event) {
let startTime = Date.now();
let finalAudioblob = await view.recorder.startRecordingWithSilenceDetection(
view.elem.voiceButton,
(audioBlob) => {
utils.stt(audioBlob).then((transcribe) => {
if (transcribe === false) {
console.log("transcribe failed, try alternative way");
whisperjaxws(audioBlob).then((transcribe) => { // Replace with your alternative transcription
utils.writeText(document.activeElement, transcribe);
});
} else {
utils.writeText(document.activeElement, transcribe.text);
}
});
}
);
if (Date.now() - startTime < appModel.minimalRecordTime) {
utils.showToast("time too short, this will not transcribe");
return;
}
let transcribe = await utils.sendAudioToLeptonWhisperApi(finalAudioblob);
if (!transcribe) {
console.log("transcribe failed, try alternative way");
transcribe = await whisperjaxws(finalAudioblob); // Replace with your alternative transcription
}
utils.writeText(document.activeElement, transcribe.text);
},
stopRecording(safeStop = true) {
appModel.isRecording = false;
if (safeStop) {
setTimeout(() => {
console.log("safeStop");
view.recorder.stopRecording();
}, 500);
} else {
view.recorder.stopRecording();
}
},
async chat(message) {
let selectText = window.getSelection().toString();
let currentLineString = utils.getCurrentLineString(document.activeElement);
prompt=`${message} ${selectText.length >= 1 ? selectText : currentLineString} `
let userText = prompt;
if (!utils.checkValidString(userText)) {
console.log("chat(): invalid userText:", userText);
return;
}
utils.displayMarkdown(userText + " \n please wait");
utils.AIComplete(userText,appModel.llm_model_info); // Replace with your LLM API call
},
async ask() {
if (appModel.isRecording) {
utils.AIComplete(utils.getSelectionText(),appModel.llm_model_info);
return;
}
let startTime = Date.now();
let audioblob = await view.recorder.startRecording(view.elem.voiceButton);
if (Date.now() - startTime < appModel.minimalRecordTime) {
utils.showToast("time too short, this will not transcribe");
console.log("ask():", utils.getSelectionText());
utils.AIComplete(utils.getSelectionText(),appModel.llm_model_info);
return;
}
let transcribe = await utils.stt(audioblob);
if (!transcribe) {
console.log("transcribe failed, try alternative way");
transcribe = await whisperjaxws(audioblob); // Replace with your alternative transcription
}
let selectionString = window.getSelection().toString();
let userText = utils.checkValidString(selectionString)
? `"${selectionString}" ${transcribe.text}`
: transcribe.text;
if (!utils.checkValidString(userText)) {
console.log("ask(): invalid userText:", userText);
return;
}
utils.displayMarkdown(userText + " please wait");
utils.AIComplete(userText); // Replace with your LLM API call
},
};
// ... (whisperjaxws function or other external APIs) ...
export { controller };