Commit 2a0caeea authored by ali's avatar ali

feat: 照片数字人页面布局搭建,接入 Vosk ASR 语音识别流程跑通

- 在 .eslintignore 文件中添加 "*.js
- 更新了 .vscode/settings.json 中的 editor.tabSize 和 cSpell.words
- 更新了 DefaultLayout.vue:
  - 修改了脚本以使用 Vue 3 合成 API
  - 添加了路由器导航保护以处理页眉可见性
- 更新了 HeaderLayout.vue:
  - 修改了脚本以使用 Vue 3 组合 API
  - 为 ASR 项目和 ASR 选择添加了 ref
  - 添加了保存功能
- 更新了 router/index.ts:
  - 将 MainScreen.vue 重命名为 PhotoScreen.vue
  - 将 SecondScreen.vue 重命名为 VideoScreen.vue
  - 添加了 ShowPhoto.vue 组件
- 删除了 MainScreen.vue 和 SecondScreen.vue 组件
- 更新了 screens/index.ts,以导出 PhotoScreen VideoScreen 和 ShowPhoto 组件
parent 2395eaf8
......@@ -7,3 +7,4 @@ docs/
.idea/
.vscode/
.github/
*.js
\ No newline at end of file
......@@ -17,5 +17,8 @@
"editor.wordWrap": "on",
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.tabSize": 2
"editor.tabSize": 2,
"cSpell.words": [
"Vosk"
]
}
<script setup lang="tsx">
<script setup lang="ts">
import HeaderLayout from '@/renderer/components/layout/HeaderLayout.vue'
import { ref } from 'vue';
import { useRouter } from 'vue-router'
const router = useRouter()
const isHeader = ref(true);
router.beforeEach((guard) => {
isHeader.value = typeof guard.meta.isHeader === 'boolean' ? (guard.meta.isHeader as boolean) : true;
})
</script>
<template>
<v-app>
<v-layout>
<HeaderLayout />
<HeaderLayout v-if="isHeader" />
<v-main>
<slot />
</v-main>
......
<script setup lang="tsx">
import { ref } from 'vue';
import { useRoute, useRouter } from 'vue-router'
const router = useRouter()
const route: any = useRoute()
const titleKey: string = (route?.meta?.titleKey || 'title.main') as string
const handleRoute = (path: string): void => {
router.push(path)
......@@ -12,10 +12,20 @@ const handleRoute = (path: string): void => {
const isCurrentRoute = (path: string): boolean => {
return path === route.path
}
const asrItems = ref([
'Web Speech API',
'Vosk Api',
'Whisper Api'
]);
const asrSelect = ref(null);
function save() {
console.log(1);
}
</script>
<template>
<v-app-bar color="primary" density="compact">
<v-app-bar-title>{{ $t(titleKey) }}</v-app-bar-title>
<v-app-bar color="#d71b1b" density="compact" class="header">
<template #append>
<v-btn
prepend-icon="mdi-home"
......@@ -23,24 +33,89 @@ const isCurrentRoute = (path: string): boolean => {
:class="{ active: isCurrentRoute('/') }"
@click="handleRoute('/')"
>
{{ $t('title.main') }}
照片数字人
</v-btn>
<v-btn
prepend-icon="mdi-fit-to-screen-outline"
variant="text"
:class="{ active: isCurrentRoute('/second') }"
@click="handleRoute('/second')"
:class="{ active: isCurrentRoute('/video') }"
@click="handleRoute('/video')"
>
{{ $t('title.second') }}
视频数字人
</v-btn>
<v-dialog width="500">
<template #activator="{ props }">
<v-btn
v-bind="props"
color="#fff"
class="settings"
>
<v-icon
start
icon="mdi-wrench"
></v-icon>
设置
</v-btn>
</template>
<template #default="{ isActive }">
<v-card title="配置">
<v-sheet width="300" class="mx-auto">
<v-form ref="form">
<v-select
v-model="asrSelect"
:items="asrItems"
:rules="[v => !!v || '请选择 Asr']"
label="选择语音识别(ASR)"
required
></v-select>
<v-btn
color="success"
class="mt-4"
block
@click="save"
>
保存
</v-btn>
</v-form>
</v-sheet>
<v-card-actions>
<v-spacer></v-spacer>
<v-btn
text="关闭"
@click="isActive.value = false"
></v-btn>
</v-card-actions>
</v-card>
</template>
</v-dialog>
</template>
</v-app-bar>
</template>
<style scoped>
.v-btn {
<style>
.header .v-btn {
opacity: 0.4;
}
.active {
.header .active {
opacity: 1 !important;
}
.header .v-toolbar__content {
display: flex;
justify-content: center;
}
.header .v-toolbar__append {
margin-inline-start: revert !important;
margin-inline-end: revert !important;
}
.header .settings {
opacity: 1;
}
</style>
export * as Vosk from './vosk/vosk'
export type * from './vosk/vosk'
\ No newline at end of file
export interface ClientMessageLoad {
action: "load";
modelUrl: string;
}
export interface ClientMessageTerminate {
action: "terminate";
}
export interface ClientMessageRecognizerSet {
action: "set";
recognizerId: string;
key: "words";
value: boolean;
}
export interface ClientMessageGenericSet {
action: "set";
key: "logLevel";
value: number;
}
export declare type ClientMessageSet = ClientMessageRecognizerSet | ClientMessageGenericSet;
export interface ClientMessageAudioChunk {
action: "audioChunk";
recognizerId: string;
data: Float32Array;
sampleRate: number;
}
export interface ClientMessageCreateRecognizer {
action: "create";
recognizerId: string;
sampleRate: number;
grammar?: string;
}
export interface ClientMessageRetrieveFinalResult {
action: "retrieveFinalResult";
recognizerId: string;
}
export interface ClientMessageRemoveRecognizer {
action: "remove";
recognizerId: string;
}
export declare type ClientMessage = ClientMessageTerminate | ClientMessageLoad | ClientMessageCreateRecognizer | ClientMessageAudioChunk | ClientMessageSet | ClientMessageRetrieveFinalResult | ClientMessageRemoveRecognizer;
export declare namespace ClientMessage {
function isTerminateMessage(message: ClientMessage): message is ClientMessageTerminate;
function isLoadMessage(message: ClientMessage): message is ClientMessageLoad;
function isSetMessage(message: ClientMessage): message is ClientMessageSet;
function isAudioChunkMessage(message: ClientMessage): message is ClientMessageAudioChunk;
function isRecognizerCreateMessage(message: ClientMessage): message is ClientMessageCreateRecognizer;
function isRecognizerRetrieveFinalResultMessage(message: ClientMessage): message is ClientMessageRetrieveFinalResult;
function isRecognizerRemoveMessage(message: ClientMessage): message is ClientMessageRemoveRecognizer;
}
export interface ServerMessageLoadResult {
event: "load";
result: boolean;
}
export interface ServerMessageError {
event: "error";
recognizerId?: string;
error: string;
}
export interface ServerMessageResult {
event: "result";
recognizerId: string;
result: {
result: Array<{
conf: number;
start: number;
end: number;
word: string;
}>;
text: string;
};
}
export interface ServerMessagePartialResult {
event: "partialresult";
recognizerId: string;
result: {
partial: string;
};
}
export declare type ModelMessage = ServerMessageLoadResult | ServerMessageError;
export declare namespace ModelMessage {
function isLoadResult(message: any): message is ServerMessageLoadResult;
}
export declare type RecognizerMessage = ServerMessagePartialResult | ServerMessageResult | ServerMessageError;
export declare type RecognizerEvent = RecognizerMessage["event"];
export declare type ServerMessage = ModelMessage | RecognizerMessage;
export declare namespace ServerMessage {
function isRecognizerMessage(message: ServerMessage): message is RecognizerMessage;
function isResult(message: any): message is ServerMessageResult;
function isPartialResult(message: any): message is ServerMessagePartialResult;
}
import { ModelMessage, RecognizerEvent, RecognizerMessage } from "./interfaces";
export * from "./interfaces";
export declare class Model extends EventTarget {
private modelUrl;
private worker;
private _ready;
private messagePort;
private logger;
private recognizers;
constructor(modelUrl: string, logLevel?: number);
private initialize;
private postMessage;
private handleMessage;
on(event: ModelMessage["event"], listener: (message: ModelMessage) => void): void;
registerPort(port: MessagePort): void;
private forwardMessage;
get ready(): boolean;
terminate(): void;
setLogLevel(level: number): void;
registerRecognizer(recognizer: KaldiRecognizer): void;
unregisterRecognizer(recognizerId: string): void;
/**
* KaldiRecognizer anonymous class
*/
get KaldiRecognizer(): {
new (sampleRate: number, grammar?: string): {
id: string;
on(event: RecognizerEvent, listener: (message: RecognizerMessage) => void): void;
setWords(words: boolean): void;
acceptWaveform(buffer: AudioBuffer): void;
acceptWaveformFloat(buffer: Float32Array, sampleRate: number): void;
retrieveFinalResult(): void;
remove(): void;
addEventListener(type: string, callback: EventListenerOrEventListenerObject | null, options?: boolean | AddEventListenerOptions | undefined): void;
dispatchEvent(event: Event): boolean;
removeEventListener(type: string, callback: EventListenerOrEventListenerObject | null, options?: boolean | EventListenerOptions | undefined): void;
};
};
}
export declare type KaldiRecognizer = InstanceType<Model["KaldiRecognizer"]>;
export declare function createModel(modelUrl: string, logLevel?: number): Promise<Model>;
export declare class Logger {
private logLevel;
constructor(logLevel?: number);
getLogLevel(): number;
setLogLevel(level: number): void;
error(message: string): void;
warn(message: string): void;
info(message: string): void;
verbose(message: string): void;
debug(message: string): void;
}
This source diff could not be displayed because it is too large. You can view the blob instead.
import * as VoskWasm from "./vosk-wasm";
export interface Recognizer {
id: string;
buffAddr?: number;
buffSize?: number;
recognizer: VoskWasm.Recognizer;
sampleRate: number;
words?: boolean;
grammar?: string;
}
export declare class RecognizerWorker {
private Vosk;
private model;
private recognizers;
private logger;
constructor();
private handleMessage;
private load;
private allocateBuffer;
private freeBuffer;
private createRecognizer;
private setConfiguration;
private processAudioChunk;
private retrieveFinalResult;
private removeRecognizer;
private terminate;
}
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1640330134243" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="5686" width="30" height="30" xmlns:xlink="http://www.w3.org/1999/xlink"><defs><style type="text/css"></style></defs><path d="M515.626667 818.858667h-7.338667a269.653333 269.653333 0 0 1-261.589333-276.48v-87.381334a21.034667 21.034667 0 1 1 42.112 0v87.509334a227.541333 227.541333 0 0 0 219.477333 234.368h7.338667a227.541333 227.541333 0 0 0 219.477333-234.368v-76.8a21.034667 21.034667 0 1 1 42.112 0v76.8a269.653333 269.653333 0 0 1-261.589333 276.352z" fill="#000000" p-id="5687"></path><path d="M384.128 937.813333a21.077333 21.077333 0 0 1 0-42.154666h103.04v-97.834667a20.992 20.992 0 1 1 41.941333 0v97.834667h102.869334a21.077333 21.077333 0 1 1 0 42.154666z" fill="#000000" p-id="5688"></path><path d="M512 128.298667a144.810667 144.810667 0 0 1 144.810667 144.810666v267.136A144.810667 144.810667 0 0 1 512 685.056a144.810667 144.810667 0 0 1-144.810667-144.810667V273.109333A144.810667 144.810667 0 0 1 512 128.298667m0-42.112a187.136 187.136 0 0 0-186.922667 186.922666v267.136a186.922667 186.922667 0 1 0 373.845334 0V273.109333A187.136 187.136 0 0 0 512 86.186667z" fill="#000000" p-id="5689"></path></svg>
\ No newline at end of file
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. this License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
The Developer of the ASR models is Alpha Cephei Inc (https://alphacephei.com/e).
Copyright 2019 Alpha Cephei Inc. All Rights Reserved.
\ No newline at end of file
class RecognizerAudioProcessor extends AudioWorkletProcessor {
constructor(options) {
super(options);
this.port.onmessage = this._processMessage.bind(this);
}
_processMessage(event) {
// console.debug(`Received event ${JSON.stringify(event.data, null, 2)}`);
if (event.data.action === "init") {
this._recognizerId = event.data.recognizerId;
this._recognizerPort = event.ports[0];
}
}
process(inputs, outputs, parameters) {
const data = inputs[0][0];
if (this._recognizerPort && data) {
// AudioBuffer samples are represented as floating point numbers between -1.0 and 1.0 whilst
// Kaldi expects them to be between -32768 and 32767 (the range of a signed int16)
const audioArray = data.map((value) => value * 0x8000);
this._recognizerPort.postMessage(
{
action: "audioChunk",
data: audioArray,
recognizerId: this._recognizerId,
sampleRate, // Part of AudioWorkletGlobalScope
},
{
transfer: [audioArray.buffer],
}
);
}
return true;
}
}
registerProcessor('recognizer-processor', RecognizerAudioProcessor)
\ No newline at end of file
import { MainScreen, ErrorScreen, SecondScreen } from '@/renderer/screens'
import { PhotoScreen, ErrorScreen, VideoScreen, ShowPhoto } from '@/renderer/screens'
import { createRouter, createWebHashHistory } from 'vue-router'
export default createRouter({
......@@ -6,23 +6,31 @@ export default createRouter({
routes: [
{
path: '/',
component: MainScreen,
component: PhotoScreen,
meta: {
titleKey: 'title.main'
titleKey: '照片数字人'
}
},
{
path: '/second',
component: SecondScreen,
path: '/video',
component: VideoScreen,
meta: {
titleKey: 'title.second'
titleKey: '视频数字人'
}
},
{
path: '/show',
component: ShowPhoto,
meta: {
titleKey: '展示数字人',
isHeader: false
}
},
{
path: '/error',
component: ErrorScreen,
meta: {
titleKey: 'title.error'
titleKey: '发生错误'
}
},
{
......
<script setup lang="tsx">
import { useI18n } from 'vue-i18n'
import { useTheme } from 'vuetify'
import { openExternal } from '@/renderer/utils'
import { useCounterStore } from '@/renderer/store/counter'
import { storeToRefs } from 'pinia'
import { onMounted, ref } from 'vue'
const { locale, availableLocales } = useI18n()
const { counterIncrease } = useCounterStore()
const { counter } = storeToRefs(useCounterStore())
const theme = useTheme()
const languages = ref(['en'])
const appVersion = ref('Unknown')
onMounted((): void => {
languages.value = availableLocales
// Get application version from package.json version string (Using IPC communication)
window.mainApi.receive('msgReceivedVersion', (event: Event, version: string) => {
appVersion.value = version
})
window.mainApi.send('msgRequestGetVersion')
})
const handleChangeTheme = (): void => {
theme.global.name.value = theme.global.current.value.dark ? 'light' : 'dark'
}
const handleChangeLanguage = (val): void => {
locale.value = val
}
const handleOpenDocument = async (): Promise<void> => {
await openExternal('https://vutron.jooy2.com')
}
const handleOpenGitHub = async (): Promise<void> => {
await openExternal('https://github.com/jooy2/vutron')
}
const handleCountIncrease = (): void => {
counterIncrease(1)
}
</script>
<template>
<v-container>
<v-row no-gutters align="center" class="text-center">
<v-col cols="12" md="7">
<h2 class="my-4">Hello Electron</h2>
<p>{{ $t('desc.welcome-desc') }}</p>
<p class="my-4">
App Version: <strong>{{ appVersion }}</strong>
</p>
<v-row class="my-4">
<v-col cols="3">
<v-btn icon color="primary" @click="handleChangeTheme">
<v-icon icon="mdi-brightness-6" />
<v-tooltip activator="parent" location="bottom">
{{ $t('menu.change-theme') }}
</v-tooltip>
</v-btn>
</v-col>
<v-col cols="3">
<v-badge data-testid="counter-badge" color="blue" :content="counter">
<v-btn data-testid="btn-counter" icon color="primary" @click="handleCountIncrease">
<v-icon icon="mdi-plus-circle" />
<v-tooltip activator="parent" location="bottom">
{{ $t('menu.increase-count') }}
</v-tooltip>
</v-btn>
</v-badge>
</v-col>
<v-col cols="3">
<v-btn icon color="primary" @click="handleOpenDocument">
<v-icon icon="mdi-file-document" />
<v-tooltip activator="parent" location="bottom">
{{ $t('menu.documentation') }}
</v-tooltip>
</v-btn>
</v-col>
<v-col cols="3">
<v-btn icon color="primary" @click="handleOpenGitHub">
<v-icon icon="mdi-github" />
<v-tooltip activator="parent" location="bottom">
{{ $t('menu.github') }}
</v-tooltip>
</v-btn>
</v-col>
<v-col cols="12">
<v-select
data-testid="select-language"
:model-value="locale"
density="compact"
:label="$t('menu.change-language')"
:items="languages"
@update:model-value="handleChangeLanguage"
>
{{ $t('menu.change-language') }}
</v-select>
</v-col>
</v-row>
</v-col>
</v-row>
</v-container>
</template>
<script setup lang="ts">
// import { useI18n } from 'vue-i18n'
// import { useTheme } from 'vuetify'
// import { openExternal } from '@/renderer/utils'
// import { useCounterStore } from '@/renderer/store/counter'
// import { storeToRefs } from 'pinia'
import { onMounted, ref } from 'vue'
// const { availableLocales } = useI18n()
// const { counterIncrease } = useCounterStore()
// const { counter } = storeToRefs(useCounterStore())
// const theme = useTheme()
// const languages = ref(['en'])
// const appVersion = ref('Unknown')
onMounted((): void => {
// languages.value = availableLocales
// window.mainApi.receive('msgReceivedVersion', (event: Event, version: string) => {
// appVersion.value = version
// })
// window.mainApi.send('msgRequestGetVersion')
})
const photoList = ref([
{
url: 'https://resources.laihua.com/2023-11-2/93ffb6a7-ae93-4918-944e-877016ba266b.png'
},
{
url: 'https://resources.laihua.com/2023-6-19/6fa9a127-2ce5-43ea-a543-475bf9354eda.png'
}
]);
let currentShowWin: Window | null = null;
function handleOpen(event: Event,url: string) {
if (currentShowWin) {
currentShowWin.close();
}
currentShowWin = window.open(`${location.href}show?url=${url}`, '_blank', `width=${ window.screen.width / 4 },height=${ window.screen.height },top=0,left=0,frame=false,nodeIntegration=no`);
}
</script>
<template>
<v-container class="d-flex mt-6">
<v-sheet v-for="item in photoList" :key="item.url" :elevation="3" width="200" class="d-flex align-center spacing-playground pa-6 mr-4" rounded @click="handleOpen($event, item.url)">
<v-img
:width="200"
aspect-ratio="1/1"
cover
:src="item.url"
></v-img>
</v-sheet>
</v-container>
<v-container class="d-flex mt-6">
<v-text-field label="自定义照片 url(https://xxx.png)"></v-text-field>
</v-container>
</template>
<script setup lang="ts">
import { ref } from 'vue';
import { useRoute, useRouter } from 'vue-router'
import { Vosk } from '@/renderer/plugins/asr/index'
import type { ServerMessagePartialResult, ServerMessageResult, Model } from '@/renderer/plugins/asr/index'
const router = useRouter()
const route = useRoute();
const sampleRate = 48000;
const recordVolume = ref(0);
router.beforeEach(g => {
if (!g.query.url) return router.push('/error');
})
const microphoneState = ref< 'waitInput' | 'input' | 'loading' | 'disabled'>('waitInput');
async function initVosk({ modelPath, result, partialResult }: {
modelPath: string;
result?: (string) => void;
partialResult?: (string) => void;
}) {
const channel = new MessageChannel();
const model = await Vosk.createModel(modelPath);
const recognizer = new model.KaldiRecognizer(sampleRate);
model.registerPort(channel.port1);
recognizer.setWords(true);
recognizer.on('result', (message) => {
result && result((message as ServerMessageResult).result.text)
});
recognizer.on('partialresult', (message) => {
partialResult && partialResult((message as ServerMessagePartialResult).result.partial)
});
return { recognizer, channel };
}
function analyzeMicrophoneVolume(stream: MediaStream, callback: (number) => void) {
const audioContext = new AudioContext();
const analyser = audioContext.createAnalyser();
const microphone = audioContext.createMediaStreamSource(stream);
const recordEventNode = audioContext.createScriptProcessor(2048, 1, 1);
const audioprocess = () => {
const array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
let values = 0;
const length = array.length;
for (let i = 0; i < length; i++) {
values += array[i];
}
const average = values / length;
callback(Math.round(average));
}
analyser.smoothingTimeConstant = 0.8;
analyser.fftSize = 1024;
microphone.connect(analyser);
analyser.connect(recordEventNode);
recordEventNode.connect(audioContext.destination);
// recordEventNode.addEventListener('audioprocess', audioprocess);
recordEventNode.onaudioprocess = audioprocess;
inputContext.audioContext2 = audioContext;
inputContext.scriptProcessorNode = recordEventNode;
}
const inputContext: { mediaStream?: MediaStream; audioContext?: AudioContext; audioContext2?: AudioContext; scriptProcessorNode?: ScriptProcessorNode; model?: Model } = {};
async function startAudioInput() {
if (microphoneState.value === 'loading') return;
if (microphoneState.value === 'input') {
microphoneState.value = 'waitInput';
inputContext.mediaStream?.getTracks().forEach((track) => track.stop());
inputContext.audioContext?.close();
inputContext.audioContext2?.close();
inputContext.scriptProcessorNode && (inputContext.scriptProcessorNode.onaudioprocess = null);
inputContext.model?.terminate();
return;
}
microphoneState.value = 'loading';
const { recognizer, channel } = await initVosk({
modelPath: new URL('/vosk/models/vosk-model-small-cn-0.3.tar.gz', import.meta.url).href,
result: text => {
console.log('----------------> result:', text);
},
partialResult: text => {
console.log('----------------> partialResult:', text);
},
});
const mediaStream = await navigator.mediaDevices.getUserMedia({
video: false,
audio: {
echoCancellation: true,
noiseSuppression: true,
channelCount: 1,
sampleRate
},
});
const audioContext = new AudioContext();
await audioContext.audioWorklet.addModule(new URL('/vosk/recognizer-processor.js', import.meta.url))
const recognizerProcessor = new AudioWorkletNode(audioContext, 'recognizer-processor', { channelCount: 1, numberOfInputs: 1, numberOfOutputs: 1 });
recognizerProcessor.port.postMessage({action: 'init', recognizerId: recognizer.id}, [ channel.port2 ])
recognizerProcessor.connect(audioContext.destination);
const source = audioContext.createMediaStreamSource(mediaStream);
source.connect(recognizerProcessor);
await analyzeMicrophoneVolume(mediaStream, (val) => {
recordVolume.value = val;
});
microphoneState.value = 'input';
inputContext.mediaStream = mediaStream;
inputContext.audioContext = audioContext;
}
function endAudioInput() {
console.log('----------------> end');
}
</script>
<template>
<div style="width: 100%; height: 100%;" class="d-flex justify-center align-center">
<v-img
v-if="route.query.url"
:width="'100%'"
aspect-ratio="1/1"
cover
:src="(route.query.url as string)"
></v-img>
</div>
<div class="voice">
<v-btn icon="" color="#fff" variant="elevated" size="x-large" :disabled="microphoneState === 'loading' || microphoneState ==='disabled'" @pointerdown="startAudioInput" @pointerup="endAudioInput">
<v-icon v-if="microphoneState === 'waitInput'" icon="mdi-microphone"></v-icon>
<v-icon v-if="microphoneState === 'loading'" icon="mdi-microphone-settings"></v-icon>
<v-icon v-if="microphoneState === 'disabled'" icon="mdi-microphone-off"></v-icon>
<template v-if="microphoneState === 'input'">
<img width="30" height="30" src="/images/microphone-input.svg" alt="" srcset="">
<div class="progress">
<span class="volume" :style="{ 'clip-path': `polygon(0 ${100 - recordVolume}%, 100% ${100 - recordVolume}%, 100% 100%, 0 100%)` }"></span>
</div>
</template>
</v-btn>
</div>
</template>
<style scoped>
.voice {
display: flex;
justify-content: center;
position: fixed;
left: 0;
right: 0;
top: 70%;
margin: auto;
}
.progress{
position: absolute;
top: 21px;
left: 28px;
width: 8px;
height: 16px;
overflow: hidden;
border-radius: 36%;
}
.progress .volume{
display: block;
width: 100%;
height: 100%;
background: #2FB84F;
border-radius: 36%;
}
</style>
import ErrorScreen from '@/renderer/screens/ErrorScreen.vue'
import MainScreen from '@/renderer/screens/MainScreen.vue'
import SecondScreen from '@/renderer/screens/SecondScreen.vue'
import PhotoScreen from '@/renderer/screens/PhotoScreen.vue'
import VideoScreen from '@/renderer/screens/VideoScreen.vue'
import ShowPhoto from '@/renderer/screens/ShowPhoto.vue'
export { ErrorScreen, MainScreen, SecondScreen }
export { ErrorScreen, PhotoScreen, VideoScreen, ShowPhoto }
......@@ -6,6 +6,7 @@
"module": "esnext",
"moduleResolution": "node",
"jsx": "preserve",
"allowJs": true,
"noImplicitAny": false,
"allowSyntheticDefaultImports": true,
"declaration": true,
......@@ -24,5 +25,12 @@
{
"path": "./tsconfig.node.json"
}
],
"exclude": [
"node_modules",
"dist",
"rollup.config.js",
"*.json",
"*.js"
]
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment