Spaces:
Sleeping
Sleeping
File size: 4,032 Bytes
26ab438 909b565 26ab438 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
import { appModel } from './model.js';
import { utils } from './utils.js';
import { view } from './view.js';
const controller = {
async handleRecording(event) {
let startTime = Date.now();
let audioblob = await view.recorder.startRecording(view.elem.voiceButton);
if (Date.now() - startTime < appModel.minimalRecordTime) {
utils.showToast("time too short, this will not transcribe");
return;
}
let transcribe = await utils.stt(audioblob);
if (!transcribe.text) {
console.log("transcribe failed, try alternative way");
transcribe = await whisperjaxws(audioblob); // Replace with your alternative transcription
}
utils.writeText(document.activeElement, transcribe.text);
},
async startRecordingWithSilenceDetection(event) {
let startTime = Date.now();
let finalAudioblob = await view.recorder.startRecordingWithSilenceDetection(
view.elem.voiceButton,
(audioBlob) => {
utils.stt(audioBlob).then((transcribe) => {
if (!transcribe?.text ) {
console.log("transcribe failed, try alternative way");
whisperjaxws(audioBlob).then((transcribe) => { // Replace with your alternative transcription
utils.writeText(document.activeElement, transcribe);
});
} else {
utils.writeText(document.activeElement, transcribe.text);
}
});
}
);
if (Date.now() - startTime < appModel.minimalRecordTime) {
utils.showToast("time too short, this will not transcribe");
return;
}
let transcribe = await utils.sendAudioToLeptonWhisperApi(finalAudioblob);
if (!transcribe) {
console.log("transcribe failed, try alternative way");
transcribe = await whisperjaxws(finalAudioblob); // Replace with your alternative transcription
}
utils.writeText(document.activeElement, transcribe.text);
},
stopRecording(safeStop = true) {
appModel.isRecording = false;
if (safeStop) {
setTimeout(() => {
console.log("safeStop");
view.recorder.stopRecording();
}, 500);
} else {
view.recorder.stopRecording();
}
},
async chat(message) {
let selectText = window.getSelection().toString();
let currentLineString = utils.getCurrentLineString(document.activeElement);
prompt=`${message} ${selectText.length >= 1 ? selectText : currentLineString} `
let userText = prompt;
if (!utils.checkValidString(userText)) {
console.log("chat(): invalid userText:", userText);
return;
}
utils.displayMarkdown(userText + " \n please wait");
utils.AIComplete(userText,appModel.llm_model_info); // Replace with your LLM API call
},
async ask() {
if (appModel.isRecording) {
utils.AIComplete(utils.getSelectionText(),appModel.llm_model_info);
return;
}
let startTime = Date.now();
let audioblob = await view.recorder.startRecording(view.elem.voiceButton);
if (Date.now() - startTime < appModel.minimalRecordTime) {
utils.showToast("time too short, this will not transcribe");
console.log("ask():", utils.getSelectionText());
utils.AIComplete(utils.getSelectionText(),appModel.llm_model_info);
return;
}
let transcribe = await utils.stt(audioblob);
if (!transcribe) {
console.log("transcribe failed, try alternative way");
transcribe = await whisperjaxws(audioblob); // Replace with your alternative transcription
}
let selectionString = window.getSelection().toString();
let userText = utils.checkValidString(selectionString)
? `"${selectionString}" ${transcribe.text}`
: transcribe.text;
if (!utils.checkValidString(userText)) {
console.log("ask(): invalid userText:", userText);
return;
}
utils.displayMarkdown(userText + " please wait");
utils.AIComplete(userText); // Replace with your LLM API call
},
};
// ... (whisperjaxws function or other external APIs) ...
export { controller }; |