Commit 2a0caeea authored by ali's avatar ali

feat: 照片数字人页面布局搭建,接入 Vosk ASR 语音识别流程跑通

- 在 .eslintignore 文件中添加 "*.js
- 更新了 .vscode/settings.json 中的 editor.tabSize 和 cSpell.words
- 更新了 DefaultLayout.vue:
  - 修改了脚本以使用 Vue 3 合成 API
  - 添加了路由器导航保护以处理页眉可见性
- 更新了 HeaderLayout.vue:
  - 修改了脚本以使用 Vue 3 组合 API
  - 为 ASR 项目和 ASR 选择添加了 ref
  - 添加了保存功能
- 更新了 router/index.ts:
  - 将 MainScreen.vue 重命名为 PhotoScreen.vue
  - 将 SecondScreen.vue 重命名为 VideoScreen.vue
  - 添加了 ShowPhoto.vue 组件
- 删除了 MainScreen.vue 和 SecondScreen.vue 组件
- 更新了 screens/index.ts,以导出 PhotoScreen VideoScreen 和 ShowPhoto 组件
parent 2395eaf8
......@@ -7,3 +7,4 @@ docs/
.idea/
.vscode/
.github/
*.js
\ No newline at end of file
......@@ -17,5 +17,8 @@
"editor.wordWrap": "on",
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.tabSize": 2
"editor.tabSize": 2,
"cSpell.words": [
"Vosk"
]
}
<script setup lang="tsx">
<script setup lang="ts">
import HeaderLayout from '@/renderer/components/layout/HeaderLayout.vue'
import { ref } from 'vue';
import { useRouter } from 'vue-router'
const router = useRouter()
const isHeader = ref(true);
router.beforeEach((guard) => {
isHeader.value = typeof guard.meta.isHeader === 'boolean' ? (guard.meta.isHeader as boolean) : true;
})
</script>
<template>
<v-app>
<v-layout>
<HeaderLayout />
<HeaderLayout v-if="isHeader" />
<v-main>
<slot />
</v-main>
......
<script setup lang="tsx">
import { ref } from 'vue';
import { useRoute, useRouter } from 'vue-router'
const router = useRouter()
const route: any = useRoute()
const titleKey: string = (route?.meta?.titleKey || 'title.main') as string
const handleRoute = (path: string): void => {
router.push(path)
......@@ -12,10 +12,20 @@ const handleRoute = (path: string): void => {
const isCurrentRoute = (path: string): boolean => {
return path === route.path
}
const asrItems = ref([
'Web Speech API',
'Vosk Api',
'Whisper Api'
]);
const asrSelect = ref(null);
function save() {
console.log(1);
}
</script>
<template>
<v-app-bar color="primary" density="compact">
<v-app-bar-title>{{ $t(titleKey) }}</v-app-bar-title>
<v-app-bar color="#d71b1b" density="compact" class="header">
<template #append>
<v-btn
prepend-icon="mdi-home"
......@@ -23,24 +33,89 @@ const isCurrentRoute = (path: string): boolean => {
:class="{ active: isCurrentRoute('/') }"
@click="handleRoute('/')"
>
{{ $t('title.main') }}
照片数字人
</v-btn>
<v-btn
prepend-icon="mdi-fit-to-screen-outline"
variant="text"
:class="{ active: isCurrentRoute('/second') }"
@click="handleRoute('/second')"
:class="{ active: isCurrentRoute('/video') }"
@click="handleRoute('/video')"
>
{{ $t('title.second') }}
视频数字人
</v-btn>
<v-dialog width="500">
<template #activator="{ props }">
<v-btn
v-bind="props"
color="#fff"
class="settings"
>
<v-icon
start
icon="mdi-wrench"
></v-icon>
设置
</v-btn>
</template>
<template #default="{ isActive }">
<v-card title="配置">
<v-sheet width="300" class="mx-auto">
<v-form ref="form">
<v-select
v-model="asrSelect"
:items="asrItems"
:rules="[v => !!v || '请选择 Asr']"
label="选择语音识别(ASR)"
required
></v-select>
<v-btn
color="success"
class="mt-4"
block
@click="save"
>
保存
</v-btn>
</v-form>
</v-sheet>
<v-card-actions>
<v-spacer></v-spacer>
<v-btn
text="关闭"
@click="isActive.value = false"
></v-btn>
</v-card-actions>
</v-card>
</template>
</v-dialog>
</template>
</v-app-bar>
</template>
<style scoped>
.v-btn {
<style>
.header .v-btn {
opacity: 0.4;
}
.active {
.header .active {
opacity: 1 !important;
}
.header .v-toolbar__content {
display: flex;
justify-content: center;
}
.header .v-toolbar__append {
margin-inline-start: revert !important;
margin-inline-end: revert !important;
}
.header .settings {
opacity: 1;
}
</style>
export * as Vosk from './vosk/vosk'
export type * from './vosk/vosk'
\ No newline at end of file
export interface ClientMessageLoad {
action: "load";
modelUrl: string;
}
export interface ClientMessageTerminate {
action: "terminate";
}
export interface ClientMessageRecognizerSet {
action: "set";
recognizerId: string;
key: "words";
value: boolean;
}
export interface ClientMessageGenericSet {
action: "set";
key: "logLevel";
value: number;
}
export declare type ClientMessageSet = ClientMessageRecognizerSet | ClientMessageGenericSet;
export interface ClientMessageAudioChunk {
action: "audioChunk";
recognizerId: string;
data: Float32Array;
sampleRate: number;
}
export interface ClientMessageCreateRecognizer {
action: "create";
recognizerId: string;
sampleRate: number;
grammar?: string;
}
export interface ClientMessageRetrieveFinalResult {
action: "retrieveFinalResult";
recognizerId: string;
}
export interface ClientMessageRemoveRecognizer {
action: "remove";
recognizerId: string;
}
export declare type ClientMessage = ClientMessageTerminate | ClientMessageLoad | ClientMessageCreateRecognizer | ClientMessageAudioChunk | ClientMessageSet | ClientMessageRetrieveFinalResult | ClientMessageRemoveRecognizer;
export declare namespace ClientMessage {
function isTerminateMessage(message: ClientMessage): message is ClientMessageTerminate;
function isLoadMessage(message: ClientMessage): message is ClientMessageLoad;
function isSetMessage(message: ClientMessage): message is ClientMessageSet;
function isAudioChunkMessage(message: ClientMessage): message is ClientMessageAudioChunk;
function isRecognizerCreateMessage(message: ClientMessage): message is ClientMessageCreateRecognizer;
function isRecognizerRetrieveFinalResultMessage(message: ClientMessage): message is ClientMessageRetrieveFinalResult;
function isRecognizerRemoveMessage(message: ClientMessage): message is ClientMessageRemoveRecognizer;
}
export interface ServerMessageLoadResult {
event: "load";
result: boolean;
}
export interface ServerMessageError {
event: "error";
recognizerId?: string;
error: string;
}
export interface ServerMessageResult {
event: "result";
recognizerId: string;
result: {
result: Array<{
conf: number;
start: number;
end: number;
word: string;
}>;
text: string;
};
}
export interface ServerMessagePartialResult {
event: "partialresult";
recognizerId: string;
result: {
partial: string;
};
}
export declare type ModelMessage = ServerMessageLoadResult | ServerMessageError;
export declare namespace ModelMessage {
function isLoadResult(message: any): message is ServerMessageLoadResult;
}
export declare type RecognizerMessage = ServerMessagePartialResult | ServerMessageResult | ServerMessageError;
export declare type RecognizerEvent = RecognizerMessage["event"];
export declare type ServerMessage = ModelMessage | RecognizerMessage;
export declare namespace ServerMessage {
function isRecognizerMessage(message: ServerMessage): message is RecognizerMessage;
function isResult(message: any): message is ServerMessageResult;
function isPartialResult(message: any): message is ServerMessagePartialResult;
}
import { ModelMessage, RecognizerEvent, RecognizerMessage } from "./interfaces";
export * from "./interfaces";
export declare class Model extends EventTarget {
private modelUrl;
private worker;
private _ready;
private messagePort;
private logger;
private recognizers;
constructor(modelUrl: string, logLevel?: number);
private initialize;
private postMessage;
private handleMessage;
on(event: ModelMessage["event"], listener: (message: ModelMessage) => void): void;
registerPort(port: MessagePort): void;
private forwardMessage;
get ready(): boolean;
terminate(): void;
setLogLevel(level: number): void;
registerRecognizer(recognizer: KaldiRecognizer): void;
unregisterRecognizer(recognizerId: string): void;
/**
* KaldiRecognizer anonymous class
*/
get KaldiRecognizer(): {
new (sampleRate: number, grammar?: string): {
id: string;
on(event: RecognizerEvent, listener: (message: RecognizerMessage) => void): void;
setWords(words: boolean): void;
acceptWaveform(buffer: AudioBuffer): void;
acceptWaveformFloat(buffer: Float32Array, sampleRate: number): void;
retrieveFinalResult(): void;
remove(): void;
addEventListener(type: string, callback: EventListenerOrEventListenerObject | null, options?: boolean | AddEventListenerOptions | undefined): void;
dispatchEvent(event: Event): boolean;
removeEventListener(type: string, callback: EventListenerOrEventListenerObject | null, options?: boolean | EventListenerOptions | undefined): void;
};
};
}
export declare type KaldiRecognizer = InstanceType<Model["KaldiRecognizer"]>;
export declare function createModel(modelUrl: string, logLevel?: number): Promise<Model>;
export declare class Logger {
private logLevel;
constructor(logLevel?: number);
getLogLevel(): number;
setLogLevel(level: number): void;
error(message: string): void;
warn(message: string): void;
info(message: string): void;
verbose(message: string): void;
debug(message: string): void;
}
This diff is collapsed.
import * as VoskWasm from "./vosk-wasm";
export interface Recognizer {
id: string;
buffAddr?: number;
buffSize?: number;
recognizer: VoskWasm.Recognizer;
sampleRate: number;
words?: boolean;
grammar?: string;
}
export declare class RecognizerWorker {
private Vosk;
private model;
private recognizers;
private logger;
constructor();
private handleMessage;
private load;
private allocateBuffer;
private freeBuffer;
private createRecognizer;
private setConfiguration;
private processAudioChunk;
private retrieveFinalResult;
private removeRecognizer;
private terminate;
}
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1640330134243" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="5686" width="30" height="30" xmlns:xlink="http://www.w3.org/1999/xlink"><defs><style type="text/css"></style></defs><path d="M515.626667 818.858667h-7.338667a269.653333 269.653333 0 0 1-261.589333-276.48v-87.381334a21.034667 21.034667 0 1 1 42.112 0v87.509334a227.541333 227.541333 0 0 0 219.477333 234.368h7.338667a227.541333 227.541333 0 0 0 219.477333-234.368v-76.8a21.034667 21.034667 0 1 1 42.112 0v76.8a269.653333 269.653333 0 0 1-261.589333 276.352z" fill="#000000" p-id="5687"></path><path d="M384.128 937.813333a21.077333 21.077333 0 0 1 0-42.154666h103.04v-97.834667a20.992 20.992 0 1 1 41.941333 0v97.834667h102.869334a21.077333 21.077333 0 1 1 0 42.154666z" fill="#000000" p-id="5688"></path><path d="M512 128.298667a144.810667 144.810667 0 0 1 144.810667 144.810666v267.136A144.810667 144.810667 0 0 1 512 685.056a144.810667 144.810667 0 0 1-144.810667-144.810667V273.109333A144.810667 144.810667 0 0 1 512 128.298667m0-42.112a187.136 187.136 0 0 0-186.922667 186.922666v267.136a186.922667 186.922667 0 1 0 373.845334 0V273.109333A187.136 187.136 0 0 0 512 86.186667z" fill="#000000" p-id="5689"></path></svg>
\ No newline at end of file
This diff is collapsed.
The Developer of the ASR models is Alpha Cephei Inc (https://alphacephei.com/e).
Copyright 2019 Alpha Cephei Inc. All Rights Reserved.
\ No newline at end of file
class RecognizerAudioProcessor extends AudioWorkletProcessor {
constructor(options) {
super(options);
this.port.onmessage = this._processMessage.bind(this);
}
_processMessage(event) {
// console.debug(`Received event ${JSON.stringify(event.data, null, 2)}`);
if (event.data.action === "init") {
this._recognizerId = event.data.recognizerId;
this._recognizerPort = event.ports[0];
}
}
process(inputs, outputs, parameters) {
const data = inputs[0][0];
if (this._recognizerPort && data) {
// AudioBuffer samples are represented as floating point numbers between -1.0 and 1.0 whilst
// Kaldi expects them to be between -32768 and 32767 (the range of a signed int16)
const audioArray = data.map((value) => value * 0x8000);
this._recognizerPort.postMessage(
{
action: "audioChunk",
data: audioArray,
recognizerId: this._recognizerId,
sampleRate, // Part of AudioWorkletGlobalScope
},
{
transfer: [audioArray.buffer],
}
);
}
return true;
}
}
registerProcessor('recognizer-processor', RecognizerAudioProcessor)
\ No newline at end of file
import { MainScreen, ErrorScreen, SecondScreen } from '@/renderer/screens'
import { PhotoScreen, ErrorScreen, VideoScreen, ShowPhoto } from '@/renderer/screens'
import { createRouter, createWebHashHistory } from 'vue-router'
export default createRouter({
......@@ -6,23 +6,31 @@ export default createRouter({
routes: [
{
path: '/',
component: MainScreen,
component: PhotoScreen,
meta: {
titleKey: 'title.main'
titleKey: '照片数字人'
}
},
{
path: '/second',
component: SecondScreen,
path: '/video',
component: VideoScreen,
meta: {
titleKey: 'title.second'
titleKey: '视频数字人'
}
},
{
path: '/show',
component: ShowPhoto,
meta: {
titleKey: '展示数字人',
isHeader: false
}
},
{
path: '/error',
component: ErrorScreen,
meta: {
titleKey: 'title.error'
titleKey: '发生错误'
}
},
{
......
<script setup lang="tsx">
import { useI18n } from 'vue-i18n'
import { useTheme } from 'vuetify'
import { openExternal } from '@/renderer/utils'
import { useCounterStore } from '@/renderer/store/counter'
import { storeToRefs } from 'pinia'
import { onMounted, ref } from 'vue'
const { locale, availableLocales } = useI18n()
const { counterIncrease } = useCounterStore()
const { counter } = storeToRefs(useCounterStore())
const theme = useTheme()
const languages = ref(['en'])
const appVersion = ref('Unknown')
onMounted((): void => {
languages.value = availableLocales
// Get application version from package.json version string (Using IPC communication)
window.mainApi.receive('msgReceivedVersion', (event: Event, version: string) => {
appVersion.value = version
})
window.mainApi.send('msgRequestGetVersion')
})
const handleChangeTheme = (): void => {
theme.global.name.value = theme.global.current.value.dark ? 'light' : 'dark'
}
const handleChangeLanguage = (val): void => {
locale.value = val
}
const handleOpenDocument = async (): Promise<void> => {
await openExternal('https://vutron.jooy2.com')
}
const handleOpenGitHub = async (): Promise<void> => {
await openExternal('https://github.com/jooy2/vutron')
}
const handleCountIncrease = (): void => {
counterIncrease(1)
}
</script>
<template>
<v-container>
<v-row no-gutters align="center" class="text-center">
<v-col cols="12" md="7">
<h2 class="my-4">Hello Electron</h2>
<p>{{ $t('desc.welcome-desc') }}</p>
<p class="my-4">
App Version: <strong>{{ appVersion }}</strong>
</p>
<v-row class="my-4">
<v-col cols="3">
<v-btn icon color="primary" @click="handleChangeTheme">
<v-icon icon="mdi-brightness-6" />
<v-tooltip activator="parent" location="bottom">
{{ $t('menu.change-theme') }}
</v-tooltip>
</v-btn>
</v-col>
<v-col cols="3">
<v-badge data-testid="counter-badge" color="blue" :content="counter">
<v-btn data-testid="btn-counter" icon color="primary" @click="handleCountIncrease">
<v-icon icon="mdi-plus-circle" />
<v-tooltip activator="parent" location="bottom">
{{ $t('menu.increase-count') }}
</v-tooltip>
</v-btn>
</v-badge>
</v-col>
<v-col cols="3">
<v-btn icon color="primary" @click="handleOpenDocument">
<v-icon icon="mdi-file-document" />
<v-tooltip activator="parent" location="bottom">
{{ $t('menu.documentation') }}
</v-tooltip>
</v-btn>
</v-col>
<v-col cols="3">
<v-btn icon color="primary" @click="handleOpenGitHub">
<v-icon icon="mdi-github" />
<v-tooltip activator="parent" location="bottom">
{{ $t('menu.github') }}
</v-tooltip>
</v-btn>
</v-col>
<v-col cols="12">
<v-select
data-testid="select-language"
:model-value="locale"
density="compact"
:label="$t('menu.change-language')"
:items="languages"
@update:model-value="handleChangeLanguage"
>
{{ $t('menu.change-language') }}
</v-select>
</v-col>
</v-row>
</v-col>
</v-row>
</v-container>
</template>
<script setup lang="ts">
// import { useI18n } from 'vue-i18n'
// import { useTheme } from 'vuetify'
// import { openExternal } from '@/renderer/utils'
// import { useCounterStore } from '@/renderer/store/counter'
// import { storeToRefs } from 'pinia'
import { onMounted, ref } from 'vue'
// const { availableLocales } = useI18n()
// const { counterIncrease } = useCounterStore()
// const { counter } = storeToRefs(useCounterStore())
// const theme = useTheme()
// const languages = ref(['en'])
// const appVersion = ref('Unknown')
onMounted((): void => {
// languages.value = availableLocales
// window.mainApi.receive('msgReceivedVersion', (event: Event, version: string) => {
// appVersion.value = version
// })
// window.mainApi.send('msgRequestGetVersion')
})
const photoList = ref([
{
url: 'https://resources.laihua.com/2023-11-2/93ffb6a7-ae93-4918-944e-877016ba266b.png'
},
{
url: 'https://resources.laihua.com/2023-6-19/6fa9a127-2ce5-43ea-a543-475bf9354eda.png'
}
]);
let currentShowWin: Window | null = null;
function handleOpen(event: Event,url: string) {
if (currentShowWin) {
currentShowWin.close();
}
currentShowWin = window.open(`${location.href}show?url=${url}`, '_blank', `width=${ window.screen.width / 4 },height=${ window.screen.height },top=0,left=0,frame=false,nodeIntegration=no`);
}
</script>
<template>
<v-container class="d-flex mt-6">
<v-sheet v-for="item in photoList" :key="item.url" :elevation="3" width="200" class="d-flex align-center spacing-playground pa-6 mr-4" rounded @click="handleOpen($event, item.url)">
<v-img
:width="200"
aspect-ratio="1/1"
cover
:src="item.url"
></v-img>
</v-sheet>
</v-container>
<v-container class="d-flex mt-6">
<v-text-field label="自定义照片 url(https://xxx.png)"></v-text-field>
</v-container>
</template>
<script setup lang="ts">
import { ref } from 'vue';
import { useRoute, useRouter } from 'vue-router'
import { Vosk } from '@/renderer/plugins/asr/index'
import type { ServerMessagePartialResult, ServerMessageResult, Model } from '@/renderer/plugins/asr/index'
const router = useRouter()
const route = useRoute();
const sampleRate = 48000;
const recordVolume = ref(0);
router.beforeEach(g => {
if (!g.query.url) return router.push('/error');
})
const microphoneState = ref< 'waitInput' | 'input' | 'loading' | 'disabled'>('waitInput');
async function initVosk({ modelPath, result, partialResult }: {
modelPath: string;
result?: (string) => void;
partialResult?: (string) => void;
}) {
const channel = new MessageChannel();
const model = await Vosk.createModel(modelPath);
const recognizer = new model.KaldiRecognizer(sampleRate);
model.registerPort(channel.port1);
recognizer.setWords(true);
recognizer.on('result', (message) => {
result && result((message as ServerMessageResult).result.text)
});
recognizer.on('partialresult', (message) => {
partialResult && partialResult((message as ServerMessagePartialResult).result.partial)
});
return { recognizer, channel };
}
function analyzeMicrophoneVolume(stream: MediaStream, callback: (number) => void) {
const audioContext = new AudioContext();
const analyser = audioContext.createAnalyser();
const microphone = audioContext.createMediaStreamSource(stream);
const recordEventNode = audioContext.createScriptProcessor(2048, 1, 1);
const audioprocess = () => {
const array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
let values = 0;
const length = array.length;
for (let i = 0; i < length; i++) {
values += array[i];
}
const average = values / length;
callback(Math.round(average));
}
analyser.smoothingTimeConstant = 0.8;
analyser.fftSize = 1024;
microphone.connect(analyser);
analyser.connect(recordEventNode);
recordEventNode.connect(audioContext.destination);
// recordEventNode.addEventListener('audioprocess', audioprocess);
recordEventNode.onaudioprocess = audioprocess;
inputContext.audioContext2 = audioContext;
inputContext.scriptProcessorNode = recordEventNode;
}
const inputContext: { mediaStream?: MediaStream; audioContext?: AudioContext; audioContext2?: AudioContext; scriptProcessorNode?: ScriptProcessorNode; model?: Model } = {};
async function startAudioInput() {
if (microphoneState.value === 'loading') return;
if (microphoneState.value === 'input') {
microphoneState.value = 'waitInput';
inputContext.mediaStream?.getTracks().forEach((track) => track.stop());
inputContext.audioContext?.close();
inputContext.audioContext2?.close();
inputContext.scriptProcessorNode && (inputContext.scriptProcessorNode.onaudioprocess = null);
inputContext.model?.terminate();
return;
}
microphoneState.value = 'loading';
const { recognizer, channel } = await initVosk({
modelPath: new URL('/vosk/models/vosk-model-small-cn-0.3.tar.gz', import.meta.url).href,
result: text => {
console.log('----------------> result:', text);
},
partialResult: text => {
console.log('----------------> partialResult:', text);
},
});
const mediaStream = await navigator.mediaDevices.getUserMedia({
video: false,
audio: {
echoCancellation: true,
noiseSuppression: true,
channelCount: 1,
sampleRate
},
});
const audioContext = new AudioContext();
await audioContext.audioWorklet.addModule(new URL('/vosk/recognizer-processor.js', import.meta.url))
const recognizerProcessor = new AudioWorkletNode(audioContext, 'recognizer-processor', { channelCount: 1, numberOfInputs: 1, numberOfOutputs: 1 });
recognizerProcessor.port.postMessage({action: 'init', recognizerId: recognizer.id}, [ channel.port2 ])
recognizerProcessor.connect(audioContext.destination);
const source = audioContext.createMediaStreamSource(mediaStream);
source.connect(recognizerProcessor);
await analyzeMicrophoneVolume(mediaStream, (val) => {
recordVolume.value = val;
});
microphoneState.value = 'input';
inputContext.mediaStream = mediaStream;
inputContext.audioContext = audioContext;
}
function endAudioInput() {
console.log('----------------> end');
}
</script>
<template>
<div style="width: 100%; height: 100%;" class="d-flex justify-center align-center">
<v-img
v-if="route.query.url"
:width="'100%'"
aspect-ratio="1/1"
cover
:src="(route.query.url as string)"
></v-img>
</div>
<div class="voice">
<v-btn icon="" color="#fff" variant="elevated" size="x-large" :disabled="microphoneState === 'loading' || microphoneState ==='disabled'" @pointerdown="startAudioInput" @pointerup="endAudioInput">
<v-icon v-if="microphoneState === 'waitInput'" icon="mdi-microphone"></v-icon>
<v-icon v-if="microphoneState === 'loading'" icon="mdi-microphone-settings"></v-icon>
<v-icon v-if="microphoneState === 'disabled'" icon="mdi-microphone-off"></v-icon>
<template v-if="microphoneState === 'input'">
<img width="30" height="30" src="/images/microphone-input.svg" alt="" srcset="">
<div class="progress">
<span class="volume" :style="{ 'clip-path': `polygon(0 ${100 - recordVolume}%, 100% ${100 - recordVolume}%, 100% 100%, 0 100%)` }"></span>
</div>
</template>
</v-btn>
</div>
</template>
<style scoped>
.voice {
display: flex;
justify-content: center;
position: fixed;
left: 0;
right: 0;
top: 70%;
margin: auto;
}
.progress{
position: absolute;
top: 21px;
left: 28px;
width: 8px;
height: 16px;
overflow: hidden;
border-radius: 36%;
}
.progress .volume{
display: block;
width: 100%;
height: 100%;
background: #2FB84F;
border-radius: 36%;
}
</style>
import ErrorScreen from '@/renderer/screens/ErrorScreen.vue'
import MainScreen from '@/renderer/screens/MainScreen.vue'
import SecondScreen from '@/renderer/screens/SecondScreen.vue'
import PhotoScreen from '@/renderer/screens/PhotoScreen.vue'
import VideoScreen from '@/renderer/screens/VideoScreen.vue'
import ShowPhoto from '@/renderer/screens/ShowPhoto.vue'
export { ErrorScreen, MainScreen, SecondScreen }
export { ErrorScreen, PhotoScreen, VideoScreen, ShowPhoto }
......@@ -6,6 +6,7 @@
"module": "esnext",
"moduleResolution": "node",
"jsx": "preserve",
"allowJs": true,
"noImplicitAny": false,
"allowSyntheticDefaultImports": true,
"declaration": true,
......@@ -24,5 +25,12 @@
{
"path": "./tsconfig.node.json"
}
],
"exclude": [
"node_modules",
"dist",
"rollup.config.js",
"*.json",
"*.js"
]
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment