| |
|
| | import { appModel } from './model.js'; |
| | import { utils } from './utils.js'; |
| | import { view } from './view.js'; |
| |
|
| | const controller = { |
| |
|
| | async handleRecording(event) { |
| | let startTime = Date.now(); |
| | let audioblob = await view.recorder.startRecording(view.elem.voiceButton); |
| |
|
| | if (Date.now() - startTime < appModel.minimalRecordTime) { |
| | utils.showToast("time too short, this will not transcribe"); |
| | return; |
| | } |
| |
|
| | let transcribe = await utils.stt(audioblob); |
| | if (!transcribe.text) { |
| | console.log("transcribe failed, try alternative way"); |
| | transcribe = await whisperjaxws(audioblob); |
| | } |
| | utils.writeText(document.activeElement, transcribe.text); |
| | }, |
| |
|
| | async startRecordingWithSilenceDetection(event) { |
| | let startTime = Date.now(); |
| | let finalAudioblob = await view.recorder.startRecordingWithSilenceDetection( |
| | view.elem.voiceButton, |
| | (audioBlob) => { |
| | utils.stt(audioBlob).then((transcribe) => { |
| | if (transcribe === false) { |
| | console.log("transcribe failed, try alternative way"); |
| | whisperjaxws(audioBlob).then((transcribe) => { |
| | utils.writeText(document.activeElement, transcribe); |
| | }); |
| | } else { |
| | utils.writeText(document.activeElement, transcribe.text); |
| | } |
| | }); |
| | } |
| | ); |
| |
|
| | if (Date.now() - startTime < appModel.minimalRecordTime) { |
| | utils.showToast("time too short, this will not transcribe"); |
| | return; |
| | } |
| |
|
| | let transcribe = await utils.sendAudioToLeptonWhisperApi(finalAudioblob); |
| | if (!transcribe) { |
| | console.log("transcribe failed, try alternative way"); |
| | transcribe = await whisperjaxws(finalAudioblob); |
| | } |
| | utils.writeText(document.activeElement, transcribe.text); |
| | }, |
| |
|
| | stopRecording(safeStop = true) { |
| | appModel.isRecording = false; |
| | if (safeStop) { |
| | setTimeout(() => { |
| | console.log("safeStop"); |
| | view.recorder.stopRecording(); |
| | }, 500); |
| | } else { |
| | view.recorder.stopRecording(); |
| | } |
| | }, |
| |
|
| | async chat(message) { |
| | let selectText = window.getSelection().toString(); |
| | let currentLineString = utils.getCurrentLineString(document.activeElement); |
| |
|
| | prompt=`${message} ${selectText.length >= 1 ? selectText : currentLineString} ` |
| |
|
| | let userText = prompt; |
| |
|
| | if (!utils.checkValidString(userText)) { |
| | console.log("chat(): invalid userText:", userText); |
| | return; |
| | } |
| |
|
| | utils.displayMarkdown(userText + " \n please wait"); |
| | utils.AIComplete(userText,appModel.llm_model_info); |
| | }, |
| |
|
| | async ask() { |
| | if (appModel.isRecording) { |
| | utils.AIComplete(utils.getSelectionText(),appModel.llm_model_info); |
| | return; |
| | } |
| |
|
| | let startTime = Date.now(); |
| | let audioblob = await view.recorder.startRecording(view.elem.voiceButton); |
| |
|
| | if (Date.now() - startTime < appModel.minimalRecordTime) { |
| | utils.showToast("time too short, this will not transcribe"); |
| | console.log("ask():", utils.getSelectionText()); |
| | utils.AIComplete(utils.getSelectionText(),appModel.llm_model_info); |
| |
|
| | return; |
| | } |
| |
|
| | let transcribe = await utils.stt(audioblob); |
| | if (!transcribe) { |
| | console.log("transcribe failed, try alternative way"); |
| | transcribe = await whisperjaxws(audioblob); |
| | } |
| | let selectionString = window.getSelection().toString(); |
| | let userText = utils.checkValidString(selectionString) |
| | ? `"${selectionString}" ${transcribe.text}` |
| | : transcribe.text; |
| |
|
| | if (!utils.checkValidString(userText)) { |
| | console.log("ask(): invalid userText:", userText); |
| | return; |
| | } |
| |
|
| | utils.displayMarkdown(userText + " please wait"); |
| | utils.AIComplete(userText); |
| | }, |
| | }; |
| |
|
| | |
| |
|
| | export { controller }; |