Commit 71728fa5 authored by ali's avatar ali

feat: llm websocket 重写成 llm loop post

parent 19dfc9af
...@@ -118,6 +118,8 @@ export default class IPCs { ...@@ -118,6 +118,8 @@ export default class IPCs {
static initializeChildWindow(window: BrowserWindow) { static initializeChildWindow(window: BrowserWindow) {
ipcMain.on('fileUpload', async (event, path: string) => { ipcMain.on('fileUpload', async (event, path: string) => {
try {
const content = IPCs.readFile(path) const content = IPCs.readFile(path)
const formData = new FormData() const formData = new FormData()
const blob = new Blob([content], { type: 'audio/wav' }) const blob = new Blob([content], { type: 'audio/wav' })
...@@ -127,8 +129,11 @@ export default class IPCs { ...@@ -127,8 +129,11 @@ export default class IPCs {
method: 'POST', method: 'POST',
data: formData data: formData
}) })
window.webContents.send('msgReceivedFileUploadResponse', response) window.webContents.send('msgReceivedFileUploadResponse', response)
} catch (error) {
window.webContents.send('msgReceivedFileUploadResponse', { code: 500, message: JSON.stringify(error) })
}
}) })
} }
} }
...@@ -26,7 +26,6 @@ const microphoneState = ref<'waitInput' | 'input' | 'loading' | 'disabled' | 're ...@@ -26,7 +26,6 @@ const microphoneState = ref<'waitInput' | 'input' | 'loading' | 'disabled' | 're
const videoElement = ref<HTMLVideoElement | null>(null) const videoElement = ref<HTMLVideoElement | null>(null)
const can = ref<HTMLCanvasElement | null>(null) const can = ref<HTMLCanvasElement | null>(null)
let photoRole: PhotoRole | null = null let photoRole: PhotoRole | null = null
let flvPlayer: flvjs.Player | null = null
const inputContext: { const inputContext: {
mediaStream?: MediaStream mediaStream?: MediaStream
audioContext?: AudioContext audioContext?: AudioContext
...@@ -79,8 +78,6 @@ async function init() { ...@@ -79,8 +78,6 @@ async function init() {
photoRole = new PhotoRole(settings.liveHost, `${item?.liveUrl}`, canvasEle) photoRole = new PhotoRole(settings.liveHost, `${item?.liveUrl}`, canvasEle)
photoRole.on('asyncAnswer', onAsyncAnswer) photoRole.on('asyncAnswer', onAsyncAnswer)
// initPlayer(videoEle);
try { try {
await photoRole.init() await photoRole.init()
} catch (error) { } catch (error) {
...@@ -113,78 +110,10 @@ async function onAsyncAnswer(ans: PhotoAnswer) { ...@@ -113,78 +110,10 @@ async function onAsyncAnswer(ans: PhotoAnswer) {
} }
} }
function draw(
ctx: CanvasRenderingContext2D,
img: HTMLImageElement,
liveVideo?: HTMLVideoElement,
videoInfo?: {
center: {
x: number
y: number
}
width: number
height: number
r_w: number
r_h: number
}
) {
ctx.clearRect(0, 0, img.naturalWidth, img.naturalHeight)
ctx.drawImage(img, 0, 0, img.naturalWidth, img.naturalHeight)
if (liveVideo && videoInfo) {
const { center, r_w, r_h } = videoInfo
ctx.drawImage(liveVideo, center.x - r_w / 2, center.y - r_h / 2, r_w, r_h)
}
}
async function initPlayer(videoEle: HTMLVideoElement) {
flvPlayer = flvjs.createPlayer(
{
url: 'http://127.0.0.1:7001/live/movie.flv',
type: 'flv',
isLive: true,
cors: true
},
{
// enableWorker: true,
enableStashBuffer: false,
stashInitialSize: 128
}
)
flvPlayer.attachMediaElement(videoEle)
flvPlayer.load()
await flvPlayer.play()
}
router.beforeEach((g) => { router.beforeEach((g) => {
if (!g.query.url) return router.push('/error') if (!g.query.url) return router.push('/error')
}) })
async function initVosk({
result,
partialResult
}: {
result?: (string) => void
partialResult?: (string) => void
}) {
const channel = new MessageChannel()
const model = await settings.downLoadVoskModel()
const recognizer = new model.KaldiRecognizer(sampleRate)
model.registerPort(channel.port1)
recognizer.setWords(true)
recognizer.on('result', (message) => {
result && result((message as ServerMessageResult).result.text)
})
recognizer.on('partialresult', (message) => {
partialResult && partialResult((message as ServerMessagePartialResult).result.partial)
})
return { recognizer, channel }
}
function analyzeMicrophoneVolume(stream: MediaStream, callback: (number) => void) { function analyzeMicrophoneVolume(stream: MediaStream, callback: (number) => void) {
const audioContext = new AudioContext() const audioContext = new AudioContext()
const analyser = audioContext.createAnalyser() const analyser = audioContext.createAnalyser()
...@@ -214,62 +143,6 @@ function analyzeMicrophoneVolume(stream: MediaStream, callback: (number) => void ...@@ -214,62 +143,6 @@ function analyzeMicrophoneVolume(stream: MediaStream, callback: (number) => void
inputContext.scriptProcessorNode = recordEventNode inputContext.scriptProcessorNode = recordEventNode
} }
async function startVoskWasmAudioInput() {
if (microphoneState.value === 'loading') return
if (microphoneState.value === 'input') {
endAudioInput()
return
}
microphoneState.value = 'loading'
const { recognizer, channel } = await initVosk({
result: onQ,
partialResult: (text) => {
// console.log('----------------> partialResult:', text)
}
})
sampleRate = 48000
const mediaStream = await navigator.mediaDevices.getUserMedia({
video: false,
audio: {
echoCancellation: true,
noiseSuppression: true,
channelCount: 1,
sampleRate
}
})
const audioContext = new AudioContext()
await audioContext.audioWorklet.addModule(
new URL('/vosk/recognizer-processor.js', import.meta.url)
)
const recognizerProcessor = new AudioWorkletNode(audioContext, 'recognizer-processor', {
channelCount: 1,
numberOfInputs: 1,
numberOfOutputs: 1
})
recognizerProcessor.port.postMessage({ action: 'init', recognizerId: recognizer.id }, [
channel.port2
])
recognizerProcessor.connect(audioContext.destination)
const source = audioContext.createMediaStreamSource(mediaStream)
source.connect(recognizerProcessor)
await analyzeMicrophoneVolume(mediaStream, (val) => {
recordVolume.value = val
})
microphoneState.value = 'input'
inputContext.mediaStream = mediaStream
inputContext.audioContext = audioContext
}
async function startVoskWsAudioInput() { async function startVoskWsAudioInput() {
if (microphoneState.value === 'loading') return if (microphoneState.value === 'loading') return
...@@ -358,15 +231,21 @@ function initVoskWS() { ...@@ -358,15 +231,21 @@ function initVoskWS() {
}) })
} }
function initLLMSocket(): Promise<WebSocket> { async function llmEnd() {
const ws = new WebSocket(settings.llmUrl) const resp = (await (await fetch(`${settings.llmUrl}/api/v1/interrupt`, {
return new Promise((resolve, reject) => { method: 'POST',
ws.onopen = () => resolve(ws) headers: {
ws.onerror = reject 'Content-Type': 'application/json',
}) },
body: JSON.stringify({ end: 1 }),
mode: 'cors'
})).json() );
console.log('---------------->', resp);
} }
async function endAudioInput() { async function endAudioInput() {
await llmEnd()
microphoneState.value = 'waitInput' microphoneState.value = 'waitInput'
inputContext.ws?.close() inputContext.ws?.close()
inputContext.mediaStream?.getTracks().forEach((track) => track.stop()) inputContext.mediaStream?.getTracks().forEach((track) => track.stop())
...@@ -422,132 +301,93 @@ async function onQ(question: string) { ...@@ -422,132 +301,93 @@ async function onQ(question: string) {
microphoneState.value = 'loading' microphoneState.value = 'loading'
try {
const ws = await initLLMSocket()
const { pose, stepResolve, stepReject } = createStep() const { pose, stepResolve, stepReject } = createStep()
const messageTimeout = setTimeout(async () => {
showError('llm:timeout!')
await endAudioInput()
microphoneState.value = 'waitInput'
}, 10000)
let sliceAnswer = ''
let answer = ''
let isTime = true
let sliceAnswerLength = 10
inputContext.ws = ws
inputContext.answerArray.length = 0
inputContext.steps.length = 0 inputContext.steps.length = 0
inputContext.steps.push(pose) inputContext.steps.push(pose)
photoRole!.answerArgs = new PhotoAnswer()
photoRole!.on('asyncAnswer', onAsyncAnswer)
ws.onmessage = (message) => {
clearTimeout(messageTimeout)
try { try {
let { text, event } = JSON.parse(message.data) as { await llmLoop(question);
event: string stepResolve('llm')
message_num: number } catch (error) {
text: string microphoneState.value = 'input'
showError(`llm${error}`)
} }
}
if (event === 'stream_end') { async function llmLoop(question: string) {
inputContext.answerArray.push({ text: sliceAnswer, isLast: true }) microphoneState.value = 'loading'
sliceAnswer = '' const resp = (await (await fetch(`${settings.llmUrl}/api/v1/generate`, {
runTTSTask() method: 'POST',
inputContext.ws?.close() headers: {
console.log('----------------> answer: ', answer) 'Content-Type': 'application/json',
stepResolve('chat') },
return body: JSON.stringify({ question }),
} mode: 'cors'
})).json() );
text = text.replace(/\u0000/g, '').trim() if (resp.results[0].status_code !== 100) {
answer += text throw new Error(`status_code: ${resp.results[0].status_code}; ${ JSON.stringify(resp.results) }`);
photoRole!.answerArgs!.answer += answer
photoRole!.answerArgs!._typingAnswer.push(answer)
isTime && console.time('sliceAnswer')
isTime = false
const textArr = text.split('')
for (let i = 0; i < textArr.length; i++) {
const t = textArr[i]
sliceAnswer += t
if (/[。,?!;,.?!;]/.test(t) && sliceAnswer.length >= sliceAnswerLength) {
console.timeEnd('sliceAnswer')
sliceAnswerLength = settings.llmToTTSSliceLength
inputContext.answerArray.push({ text: sliceAnswer, isLast: true })
runTTSTask()
sliceAnswer = ''
isTime = true
}
}
} catch (error) {
showError('llm:' + error)
endAudioInput().then(() => {
microphoneState.value = 'waitInput'
})
stepReject(JSON.stringify(error))
}
} }
ws.send(JSON.stringify({ prompt: question, historys_list: [] })) inputContext.steps.length = 0
} catch (error) { photoRole!.answerArgs = new PhotoAnswer()
console.error(error) photoRole!.on('asyncAnswer', onAsyncAnswer)
microphoneState.value = 'input' let index = 0;
showError(`llm${JSON.stringify(error)}`)
while (true) {
// @ts-ignore
if (microphoneState.value === 'input' || microphoneState.value === 'waitInput') {
break
} }
}
let isTTSRunning = false await new Promise( resolve => setTimeout(resolve, 100))
async function runTTSTask() {
if (isTTSRunning) return
isTTSRunning = true
const { pose, stepResolve, stepReject } = createStep() const { results } = (await (await fetch(`${settings.llmUrl}/api/v1/audio`, {
inputContext.steps.push(pose) method: 'POST',
headers: {
'Content-Type': 'application/json',
},
mode: 'cors',
body: JSON.stringify({ question })
})).json() );
try { const audioList = results[0].audio_list as string[];
while (inputContext.answerArray.length) { if (audioList.length === 0) continue;
const task = inputContext.answerArray.shift() const isEnd = audioList.at(-1) === 'stream_end';
if (!task) break
if (task.text.trim().length < 1) continue
console.time(task.text + ' TTS: ')
const res = await localTTS({
url: settings.ttsHost,
text: task.text,
audio_path: settings.userData
})
console.log('----------------> TTS:', res[0].text) if(isEnd) audioList.pop();
console.timeEnd(task.text + ' TTS: ')
console.log('---------------->', res[0].text) const newList = audioList.slice(index);
if (newList.length === 0 && isEnd) break;
if (newList.length === 0) continue;
for (let i = index; i < audioList.length; i++) {
console.log(results[0].text[i] +':'+ audioList[i]);
photoRole!.answerArgs!.answer += results[0].text[i]
photoRole!.answerArgs!._typingAnswer.push(...results[0].text[i].split(''))
}
const audioPath = await uploadFile({ filePath: res[0].text }) index += newList.length;
const audioPaths = await Promise.all(newList.map(path => {
return uploadFile({ filePath: path })
}))
// @ts-ignore // @ts-ignore
if (microphoneState.value === 'input') { if (microphoneState.value === 'input' || microphoneState.value === 'waitInput') {
break break
} }
audioPaths.forEach(audioPath => {
photoRole?.enQueue({ photoRole?.enQueue({
taskId: photoRole.sessionId, taskId: photoRole.sessionId,
audioUrl: `https://resources.laihua.com/${audioPath}`, audioUrl: `https://resources.laihua.com/${audioPath}`,
isLast: task.isLast isLast: isEnd
}) })
}
} catch (error) {
showError('tts:' + error)
endAudioInput().then(() => {
microphoneState.value = 'waitInput'
}) })
stepReject(JSON.stringify(error))
}
isTTSRunning = false if (isEnd) break;
stepResolve('TTS') }
} }
function uploadFile({ filePath }: { filePath: string }) { function uploadFile({ filePath }: { filePath: string }) {
...@@ -565,24 +405,6 @@ function uploadFile({ filePath }: { filePath: string }) { ...@@ -565,24 +405,6 @@ function uploadFile({ filePath }: { filePath: string }) {
}) })
} }
const ttsAudios: HTMLAudioElement[] = []
let isPlayRunning = false
async function runAudioPlay() {
if (isPlayRunning) return
isPlayRunning = true
const audio = ttsAudios.shift()
if (!audio) {
isPlayRunning = false
return
}
audio.onended = () => {
isPlayRunning = false
runAudioPlay()
}
await audio.play()
}
async function down() { async function down() {
if (microphoneState.value === 'reply') { if (microphoneState.value === 'reply') {
await endAudioInput() await endAudioInput()
......
...@@ -34,11 +34,13 @@ const inputContext: { ...@@ -34,11 +34,13 @@ const inputContext: {
ws?: WebSocket ws?: WebSocket
voskWs?: WebSocket voskWs?: WebSocket
asrPartial: string asrPartial: string
llmEnd: boolean
ttsAudios: HTMLAudioElement[]
playingAudio?: HTMLAudioElement playingAudio?: HTMLAudioElement
answerArray: string[]
} = { } = {
asrPartial: '', asrPartial: '',
answerArray: [] llmEnd: false,
ttsAudios: []
} }
onMounted(() => { onMounted(() => {
...@@ -56,30 +58,6 @@ const showError = (msg: string) => { ...@@ -56,30 +58,6 @@ const showError = (msg: string) => {
errorMsg.value = msg errorMsg.value = msg
} }
async function initVosk({
result,
partialResult
}: {
result?: (string) => void
partialResult?: (string) => void
}) {
const channel = new MessageChannel()
const model = await settings.downLoadVoskModel()
const recognizer = new model.KaldiRecognizer(sampleRate)
model.registerPort(channel.port1)
recognizer.setWords(true)
recognizer.on('result', (message) => {
result && result((message as ServerMessageResult).result.text)
})
recognizer.on('partialresult', (message) => {
partialResult && partialResult((message as ServerMessagePartialResult).result.partial)
})
return { recognizer, channel }
}
function analyzeMicrophoneVolume(stream: MediaStream, callback: (number) => void) { function analyzeMicrophoneVolume(stream: MediaStream, callback: (number) => void) {
const audioContext = new AudioContext() const audioContext = new AudioContext()
const analyser = audioContext.createAnalyser() const analyser = audioContext.createAnalyser()
...@@ -109,62 +87,6 @@ function analyzeMicrophoneVolume(stream: MediaStream, callback: (number) => void ...@@ -109,62 +87,6 @@ function analyzeMicrophoneVolume(stream: MediaStream, callback: (number) => void
inputContext.scriptProcessorNode = recordEventNode inputContext.scriptProcessorNode = recordEventNode
} }
async function startVoskWasmAudioInput() {
if (microphoneState.value === 'loading') return
if (microphoneState.value === 'input') {
endAudioInput()
return
}
microphoneState.value = 'loading'
const { recognizer, channel } = await initVosk({
result: onQ,
partialResult: (text) => {
// console.log('----------------> partialResult:', text)
}
})
sampleRate = 48000
const mediaStream = await navigator.mediaDevices.getUserMedia({
video: false,
audio: {
echoCancellation: true,
noiseSuppression: true,
channelCount: 1,
sampleRate
}
})
const audioContext = new AudioContext()
await audioContext.audioWorklet.addModule(
new URL('/vosk/recognizer-processor.js', import.meta.url)
)
const recognizerProcessor = new AudioWorkletNode(audioContext, 'recognizer-processor', {
channelCount: 1,
numberOfInputs: 1,
numberOfOutputs: 1
})
recognizerProcessor.port.postMessage({ action: 'init', recognizerId: recognizer.id }, [
channel.port2
])
recognizerProcessor.connect(audioContext.destination)
const source = audioContext.createMediaStreamSource(mediaStream)
source.connect(recognizerProcessor)
await analyzeMicrophoneVolume(mediaStream, (val) => {
recordVolume.value = val
})
microphoneState.value = 'input'
inputContext.mediaStream = mediaStream
inputContext.audioContext = audioContext
}
async function startVoskWsAudioInput() { async function startVoskWsAudioInput() {
if (microphoneState.value === 'loading') return if (microphoneState.value === 'loading') return
...@@ -253,15 +175,21 @@ function initVoskWS() { ...@@ -253,15 +175,21 @@ function initVoskWS() {
}) })
} }
function initLLMSocket(): Promise<WebSocket> { async function llmEnd() {
const ws = new WebSocket(settings.llmUrl) const resp = (await (await fetch(`${settings.llmUrl}/api/v1/interrupt`, {
return new Promise((resolve, reject) => { method: 'POST',
ws.onopen = () => resolve(ws) headers: {
ws.onerror = reject 'Content-Type': 'application/json',
}) },
body: JSON.stringify({ end: 1 }),
mode: 'cors'
})).json() );
console.log('---------------->', resp);
} }
function endAudioInput() { async function endAudioInput() {
await llmEnd()
microphoneState.value = 'waitInput' microphoneState.value = 'waitInput'
inputContext.ws?.close() inputContext.ws?.close()
inputContext.mediaStream?.getTracks().forEach((track) => track.stop()) inputContext.mediaStream?.getTracks().forEach((track) => track.stop())
...@@ -273,12 +201,11 @@ function endAudioInput() { ...@@ -273,12 +201,11 @@ function endAudioInput() {
inputContext.voskWs.send('{"eof" : 1}') inputContext.voskWs.send('{"eof" : 1}')
inputContext.voskWs.close() inputContext.voskWs.close()
} }
ttsAudios.length = 0 inputContext.ttsAudios.length = 0
inputContext.playingAudio?.pause() inputContext.playingAudio?.pause()
videos[1].value?.pause() videos[1].value?.pause()
videos[0].value?.pause() videos[0].value?.pause()
isPlayRunning = false isPlayRunning = false
inputContext.answerArray.length = 0
} }
const canplay = () => { const canplay = () => {
...@@ -347,126 +274,104 @@ async function onQ(question: string) { ...@@ -347,126 +274,104 @@ async function onQ(question: string) {
return return
} }
// 视频链接匹配不上,直接走大模型
try { try {
const ws = await initLLMSocket() // 视频链接匹配不上,直接走大模型
const messageTimeout = setTimeout(() => { await llmLoop(question);
showError('llm:timeout!') } catch (error) {
endAudioInput() microphoneState.value = 'input'
microphoneState.value = 'waitInput' showError(`llm:${error}`)
}, 10000)
let sliceAnswer = ''
let answer = ''
let isTime = true
let sliceAnswerLength = 10
inputContext.ws = ws
ws.onmessage = (message) => {
clearTimeout(messageTimeout)
if (microphoneState.value === 'input') {
return
} }
}
try { async function llmLoop(question: string) {
let { text, event } = JSON.parse(message.data) as { if (!role) return;
event: string microphoneState.value = 'loading'
message_num: number
text: string
}
if (event === 'stream_end') { const resp = (await (await fetch(`${settings.llmUrl}/api/v1/generate`, {
inputContext.answerArray.push(sliceAnswer) method: 'POST',
runTTSTask() headers: {
sliceAnswer = '' 'Content-Type': 'application/json',
inputContext.ws?.close() },
console.log('----------------> answer: ', answer) body: JSON.stringify({ question }),
return mode: 'cors'
} })).json() );
text = text.replace(/\u0000/g, '').trim() if (resp.results[0].status_code !== 100) {
answer += text throw new Error(`status_code: ${resp.results[0].status_code}; ${ JSON.stringify(resp.results) }`);
isTime && console.time('sliceAnswer')
isTime = false
const textArr = text.split('')
for (let i = 0; i < textArr.length; i++) {
const t = textArr[i]
sliceAnswer += t
if (/[。,?!;,.?!;]/.test(t) && sliceAnswer.length >= sliceAnswerLength) {
console.timeEnd('sliceAnswer')
sliceAnswerLength = settings.llmToTTSSliceLength
inputContext.answerArray.push(sliceAnswer)
runTTSTask()
sliceAnswer = ''
isTime = true
}
} }
} catch (error) {
console.error(error) inputContext.llmEnd = false;
showError(`message:${error}`) let index = 0;
microphoneState.value = 'waitInput'
} while (true) {
} // @ts-ignore
ws.send(JSON.stringify({ prompt: question, historys_list: [] })) if (microphoneState.value === 'input' || microphoneState.value === 'waitInput') {
} catch (error) { break
console.error(error)
microphoneState.value = 'input'
showError(`llm:${JSON.stringify(error)}`)
} }
}
let isTTSRunning = false await new Promise( resolve => setTimeout(resolve, 100))
async function runTTSTask() {
if (isTTSRunning) return
isTTSRunning = true
try { const { results } = (await (await fetch(`${settings.llmUrl}/api/v1/audio`, {
while (inputContext.answerArray.length) { method: 'POST',
const task = inputContext.answerArray.shift() headers: {
if (!task) break 'Content-Type': 'application/json',
if (task.trim().length < 1) continue },
mode: 'cors',
body: JSON.stringify({ question })
})).json() );
console.time(task + ' TTS: ') const audioList = results[0].audio_list as string[];
if (audioList.length === 0) continue;
const isEnd = audioList.at(-1) === 'stream_end';
const res = await localTTS({ if(isEnd) audioList.pop();
url: settings.ttsHost,
text: task, const newList = audioList.slice(index);
audio_path: settings.userData if (newList.length === 0 && isEnd) break;
}) if (newList.length === 0) continue;
for (let i = index; i < audioList.length; i++) {
console.log(results[0].text[i] +':'+ audioList[i]);
}
console.log('----------------> TTS:', res[0].text) index += newList.length;
console.timeEnd(task + ' TTS: ')
// @ts-ignore // @ts-ignore
if (microphoneState.value === 'input') { if (microphoneState.value === 'input' || microphoneState.value === 'waitInput') {
break break
} }
const audio = new Audio(`file://${res[0].text}`) // inputContext.ttsAudios.push(...newList.map(path => {
// const audio = new Audio(`file://${path}`)
// audio.load()
// return audio;
// }))
// TODO: test
inputContext.ttsAudios.push(...newList.map(path => {
const audio = new Audio(`http://192.168.1.57:6767/${path.split('\\').pop()}`)
audio.load() audio.load()
ttsAudios.push(audio) return audio;
}))
runAudioPlay() runAudioPlay()
}
} catch (error) { if (isEnd) break;
showError(`tts:${error}`)
microphoneState.value = 'waitInput'
console.error(error)
} }
isTTSRunning = false inputContext.llmEnd = true;
} }
const ttsAudios: HTMLAudioElement[] = []
let isPlayRunning = false let isPlayRunning = false
async function runAudioPlay() { async function runAudioPlay() {
if (isPlayRunning) return if (isPlayRunning) return
isPlayRunning = true isPlayRunning = true
const audio = ttsAudios.shift() const audio = inputContext.ttsAudios.shift()
if (!audio) { if (!audio) {
isPlayRunning = false isPlayRunning = false
videos[0].value!.pause() videos[0].value!.pause()
!isTTSRunning && (microphoneState.value = 'input') inputContext.llmEnd && (microphoneState.value = 'input')
return return
} }
audio.onended = () => { audio.onended = () => {
...@@ -482,23 +387,9 @@ async function runAudioPlay() { ...@@ -482,23 +387,9 @@ async function runAudioPlay() {
microphoneState.value = 'reply' microphoneState.value = 'reply'
} }
// eslint-disable-next-line no-unused-vars
async function xfTTS(text: string) {
const tone = settings.source.find(({ sourceId }) => settings.selectSource === sourceId)
if (!tone) return
const res = await audioAiTTS({
host: settings.ttsHost,
text,
speed: 3,
speaker: tone.sourceId,
provider: tone.provider
})
console.log('----------------> tts:', res)
}
async function down() { async function down() {
if (microphoneState.value === 'reply') { if (microphoneState.value === 'reply') {
endAudioInput() await endAudioInput()
} }
try { try {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment