Commit 7db30264 authored by ali's avatar ali

fix: 优化代码,修复bug

parent 71728fa5
......@@ -118,7 +118,6 @@ export default class IPCs {
static initializeChildWindow(window: BrowserWindow) {
ipcMain.on('fileUpload', async (event, path: string) => {
try {
const content = IPCs.readFile(path)
const formData = new FormData()
......@@ -131,9 +130,11 @@ export default class IPCs {
})
window.webContents.send('msgReceivedFileUploadResponse', response)
} catch (error) {
window.webContents.send('msgReceivedFileUploadResponse', { code: 500, message: JSON.stringify(error) })
window.webContents.send('msgReceivedFileUploadResponse', {
code: 500,
message: JSON.stringify(error)
})
}
})
}
}
......@@ -232,16 +232,18 @@ function initVoskWS() {
}
async function llmEnd() {
const resp = (await (await fetch(`${settings.llmUrl}/api/v1/interrupt`, {
const resp = await (
await fetch(`${settings.llmUrl}/api/v1/interrupt`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Content-Type': 'application/json'
},
body: JSON.stringify({ end: 1 }),
mode: 'cors'
})).json() );
})
).json()
console.log('---------------->', resp);
console.log('---------------->', resp)
}
async function endAudioInput() {
......@@ -279,7 +281,7 @@ const checkSteps = async () => {
}
count++
if (count >= 2) {
if (count >= 1) {
return true
}
}
......@@ -306,33 +308,37 @@ async function onQ(question: string) {
inputContext.steps.push(pose)
try {
await llmLoop(question);
await llmLoop(question)
stepResolve('llm')
} catch (error) {
microphoneState.value = 'input'
showError(`llm${error}`)
stepReject(error)
}
}
async function llmLoop(question: string) {
microphoneState.value = 'loading'
const resp = (await (await fetch(`${settings.llmUrl}/api/v1/generate`, {
const resp = await (
await fetch(`${settings.llmUrl}/api/v1/generate`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Content-Type': 'application/json'
},
body: JSON.stringify({ question }),
mode: 'cors'
})).json() );
})
).json()
if (resp.results[0].status_code !== 100) {
throw new Error(`status_code: ${resp.results[0].status_code}; ${ JSON.stringify(resp.results) }`);
throw new Error(`status_code: ${resp.results[0].status_code}; ${JSON.stringify(resp.results)}`)
}
inputContext.steps.length = 0
photoRole!.answerArgs = new PhotoAnswer()
// @ts-ignore
photoRole!.off('asyncAnswer', onAsyncAnswer)
photoRole!.on('asyncAnswer', onAsyncAnswer)
let index = 0;
let index = 0
while (true) {
// @ts-ignore
......@@ -340,45 +346,49 @@ async function llmLoop(question: string) {
break
}
await new Promise( resolve => setTimeout(resolve, 100))
await new Promise((resolve) => setTimeout(resolve, 100))
const { results } = (await (await fetch(`${settings.llmUrl}/api/v1/audio`, {
const { results } = await (
await fetch(`${settings.llmUrl}/api/v1/audio`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Content-Type': 'application/json'
},
mode: 'cors',
body: JSON.stringify({ question })
})).json() );
})
).json()
const audioList = results[0].audio_list as string[];
if (audioList.length === 0) continue;
const isEnd = audioList.at(-1) === 'stream_end';
const audioList = results[0].audio_list as string[]
if (audioList.length === 0) continue
const isEnd = audioList.at(-1) === 'stream_end'
if(isEnd) audioList.pop();
if (isEnd) audioList.pop()
const newList = audioList.slice(index);
if (newList.length === 0 && isEnd) break;
if (newList.length === 0) continue;
const newList = audioList.slice(index)
if (newList.length === 0 && isEnd) break
if (newList.length === 0) continue
for (let i = index; i < audioList.length; i++) {
console.log(results[0].text[i] +':'+ audioList[i]);
console.log(results[0].text[i] + ':' + audioList[i])
photoRole!.answerArgs!.answer += results[0].text[i]
photoRole!.answerArgs!._typingAnswer.push(...results[0].text[i].split(''))
}
index += newList.length;
index += newList.length
const audioPaths = await Promise.all(newList.map(path => {
const audioPaths = await Promise.all(
newList.map((path) => {
return uploadFile({ filePath: path })
}))
})
)
// @ts-ignore
if (microphoneState.value === 'input' || microphoneState.value === 'waitInput') {
break
}
audioPaths.forEach(audioPath => {
audioPaths.forEach((audioPath) => {
photoRole?.enQueue({
taskId: photoRole.sessionId,
audioUrl: `https://resources.laihua.com/${audioPath}`,
......@@ -386,7 +396,7 @@ async function llmLoop(question: string) {
})
})
if (isEnd) break;
if (isEnd) break
}
}
......
......@@ -176,16 +176,18 @@ function initVoskWS() {
}
async function llmEnd() {
const resp = (await (await fetch(`${settings.llmUrl}/api/v1/interrupt`, {
const resp = await (
await fetch(`${settings.llmUrl}/api/v1/interrupt`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Content-Type': 'application/json'
},
body: JSON.stringify({ end: 1 }),
mode: 'cors'
})).json() );
})
).json()
console.log('---------------->', resp);
console.log('---------------->', resp)
}
async function endAudioInput() {
......@@ -276,7 +278,7 @@ async function onQ(question: string) {
try {
// 视频链接匹配不上,直接走大模型
await llmLoop(question);
await llmLoop(question)
} catch (error) {
microphoneState.value = 'input'
showError(`llm:${error}`)
......@@ -284,24 +286,26 @@ async function onQ(question: string) {
}
async function llmLoop(question: string) {
if (!role) return;
if (!role) return
microphoneState.value = 'loading'
const resp = (await (await fetch(`${settings.llmUrl}/api/v1/generate`, {
const resp = await (
await fetch(`${settings.llmUrl}/api/v1/generate`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Content-Type': 'application/json'
},
body: JSON.stringify({ question }),
mode: 'cors'
})).json() );
})
).json()
if (resp.results[0].status_code !== 100) {
throw new Error(`status_code: ${resp.results[0].status_code}; ${ JSON.stringify(resp.results) }`);
throw new Error(`status_code: ${resp.results[0].status_code}; ${JSON.stringify(resp.results)}`)
}
inputContext.llmEnd = false;
let index = 0;
inputContext.llmEnd = false
let index = 0
while (true) {
// @ts-ignore
......@@ -309,57 +313,63 @@ async function llmLoop(question: string) {
break
}
await new Promise( resolve => setTimeout(resolve, 100))
await new Promise((resolve) => setTimeout(resolve, 100))
const { results } = (await (await fetch(`${settings.llmUrl}/api/v1/audio`, {
const { results } = await (
await fetch(`${settings.llmUrl}/api/v1/audio`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Content-Type': 'application/json'
},
mode: 'cors',
body: JSON.stringify({ question })
})).json() );
})
).json()
const audioList = results[0].audio_list as string[];
if (audioList.length === 0) continue;
const isEnd = audioList.at(-1) === 'stream_end';
const audioList = results[0].audio_list as string[]
if (audioList.length === 0) continue
const isEnd = audioList.at(-1) === 'stream_end'
if(isEnd) audioList.pop();
if (isEnd) audioList.pop()
const newList = audioList.slice(index);
if (newList.length === 0 && isEnd) break;
if (newList.length === 0) continue;
const newList = audioList.slice(index)
if (newList.length === 0 && isEnd) break
if (newList.length === 0) continue
for (let i = index; i < audioList.length; i++) {
console.log(results[0].text[i] +':'+ audioList[i]);
console.log(results[0].text[i] + ':' + audioList[i])
}
index += newList.length;
index += newList.length
// @ts-ignore
if (microphoneState.value === 'input' || microphoneState.value === 'waitInput') {
break
}
// inputContext.ttsAudios.push(...newList.map(path => {
// const audio = new Audio(`file://${path}`)
// audio.load()
// return audio;
// }))
inputContext.ttsAudios.push(
...newList.map((path) => {
const audio = new Audio(`file://${path}`)
audio.load()
return audio
})
)
// TODO: test
inputContext.ttsAudios.push(...newList.map(path => {
const audio = new Audio(`http://192.168.1.57:6767/${path.split('\\').pop()}`)
audio.load()
return audio;
}))
// inputContext.ttsAudios.push(
// ...newList.map((path) => {
// const audio = new Audio(`http://192.168.1.57:6767/${path.split('\\').pop()}`)
// audio.load()
// return audio
// })
// )
runAudioPlay()
if (isEnd) break;
if (isEnd) break
}
inputContext.llmEnd = true;
inputContext.llmEnd = true
}
let isPlayRunning = false
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment