feat(voice-transcribe): 新增语音转写语言过滤配置功能(支持用户自定义允许的转写语言),优化模型下载的超时处理与进度日志,提升下载稳健性,同步更新相关 UI 样式。

This commit is contained in:
Forrest
2026-01-17 19:54:31 +08:00
parent dc12df0fcf
commit 0853e049c8
9 changed files with 338 additions and 66 deletions

View File

@@ -23,6 +23,8 @@ interface ConfigSchema {
whisperModelName: string
whisperModelDir: string
whisperDownloadSource: string
autoTranscribeVoice: boolean
transcribeLanguages: string[]
}
export class ConfigService {
@@ -48,7 +50,9 @@ export class ConfigService {
llmModelPath: '',
whisperModelName: 'base',
whisperModelDir: '',
whisperDownloadSource: 'tsinghua'
whisperDownloadSource: 'tsinghua',
autoTranscribeVoice: false,
transcribeLanguages: ['zh']
}
})
}

View File

@@ -123,7 +123,16 @@ export class VoiceTranscribeService {
const tokensPath = this.resolveModelPath(SENSEVOICE_MODEL.files.tokens)
const vadPath = this.resolveModelPath((SENSEVOICE_MODEL.files as any).vad)
// 初始进度
onProgress?.({
modelName: SENSEVOICE_MODEL.name,
downloadedBytes: 0,
totalBytes: SENSEVOICE_MODEL.sizeBytes,
percent: 0
})
// 下载模型文件 (40%)
console.info('[VoiceTranscribe] 开始下载模型文件...')
await this.downloadToFile(
MODEL_DOWNLOAD_URLS.model,
modelPath,
@@ -140,6 +149,7 @@ export class VoiceTranscribeService {
)
// 下载 tokens 文件 (30%)
console.info('[VoiceTranscribe] 开始下载 tokens 文件...')
await this.downloadToFile(
MODEL_DOWNLOAD_URLS.tokens,
tokensPath,
@@ -157,6 +167,7 @@ export class VoiceTranscribeService {
)
// 下载 vad 文件 (30%)
console.info('[VoiceTranscribe] 开始下载 VAD 文件...')
await this.downloadToFile(
(MODEL_DOWNLOAD_URLS as any).vad,
vadPath,
@@ -174,6 +185,7 @@ export class VoiceTranscribeService {
}
)
console.info('[VoiceTranscribe] 所有文件下载完成')
return { success: true, modelPath, tokensPath }
} catch (error) {
const modelPath = this.resolveModelPath(SENSEVOICE_MODEL.files.model)
@@ -199,7 +211,8 @@ export class VoiceTranscribeService {
*/
async transcribeWavBuffer(
wavData: Buffer,
onPartial?: (text: string) => void
onPartial?: (text: string) => void,
languages?: string[]
): Promise<{ success: boolean; transcript?: string; error?: string }> {
return new Promise((resolve) => {
try {
@@ -211,6 +224,16 @@ export class VoiceTranscribeService {
return
}
// 获取配置的语言列表,如果没有传入则从配置读取
let supportedLanguages = languages
if (!supportedLanguages || supportedLanguages.length === 0) {
supportedLanguages = this.configService.get('transcribeLanguages')
// 如果配置中也没有或为空,使用默认值
if (!supportedLanguages || supportedLanguages.length === 0) {
supportedLanguages = ['zh']
}
}
const { Worker } = require('worker_threads')
// main.js 和 transcribeWorker.js 同在 dist-electron 目录下
const workerPath = join(__dirname, 'transcribeWorker.js')
@@ -220,7 +243,8 @@ export class VoiceTranscribeService {
modelPath,
tokensPath,
wavData,
sampleRate: 16000
sampleRate: 16000,
languages: supportedLanguages
}
})
@@ -273,10 +297,13 @@ export class VoiceTranscribeService {
const options = {
headers: {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
}
},
timeout: 30000 // 30秒连接超时
}
const request = protocol.get(url, options, (response) => {
console.info(`[VoiceTranscribe] ${fileName} 响应状态:`, response.statusCode)
// 处理重定向
if ([301, 302, 303, 307, 308].includes(response.statusCode || 0) && response.headers.location) {
if (remainingRedirects <= 0) {
@@ -297,25 +324,44 @@ export class VoiceTranscribeService {
const totalBytes = Number(response.headers['content-length'] || 0) || undefined
let downloadedBytes = 0
console.info(`[VoiceTranscribe] ${fileName} 文件大小:`, totalBytes ? `${(totalBytes / 1024 / 1024).toFixed(2)} MB` : '未知')
const writer = createWriteStream(targetPath)
// 设置数据接收超时60秒没有数据则超时
let lastDataTime = Date.now()
const dataTimeout = setInterval(() => {
if (Date.now() - lastDataTime > 60000) {
clearInterval(dataTimeout)
response.destroy()
writer.close()
reject(new Error('下载超时60秒内未收到数据'))
}
}, 5000)
response.on('data', (chunk) => {
lastDataTime = Date.now()
downloadedBytes += chunk.length
onProgress?.(downloadedBytes, totalBytes)
})
response.on('error', (error) => {
clearInterval(dataTimeout)
try { writer.close() } catch { }
console.error(`[VoiceTranscribe] ${fileName} 响应错误:`, error)
reject(error)
})
writer.on('error', (error) => {
clearInterval(dataTimeout)
try { writer.close() } catch { }
console.error(`[VoiceTranscribe] ${fileName} 写入错误:`, error)
reject(error)
})
writer.on('finish', () => {
clearInterval(dataTimeout)
writer.close()
console.info(`[VoiceTranscribe] ${fileName} 下载完成:`, targetPath)
resolve()
@@ -324,8 +370,14 @@ export class VoiceTranscribeService {
response.pipe(writer)
})
request.on('timeout', () => {
request.destroy()
console.error(`[VoiceTranscribe] ${fileName} 连接超时`)
reject(new Error('连接超时'))
})
request.on('error', (error) => {
console.error(`[VoiceTranscribe] ${fileName} 下载错误:`, error)
console.error(`[VoiceTranscribe] ${fileName} 请求错误:`, error)
reject(error)
})
})

View File

@@ -6,6 +6,43 @@ interface WorkerParams {
tokensPath: string
wavData: Buffer
sampleRate: number
languages?: string[]
}
// 语言标记映射
const LANGUAGE_TAGS: Record<string, string> = {
'zh': '<|zh|>',
'en': '<|en|>',
'ja': '<|ja|>',
'ko': '<|ko|>',
'yue': '<|yue|>' // 粤语
}
// 检查识别结果是否在允许的语言列表中
function isLanguageAllowed(result: any, allowedLanguages: string[]): boolean {
if (!result || !result.lang) {
// 如果没有语言信息,默认允许
return true
}
// 如果没有指定语言或语言列表为空,默认只允许中文
if (!allowedLanguages || allowedLanguages.length === 0) {
allowedLanguages = ['zh']
}
const langTag = result.lang
console.log('[TranscribeWorker] 检测到语言标记:', langTag)
// 检查是否在允许的语言列表中
for (const lang of allowedLanguages) {
if (LANGUAGE_TAGS[lang] === langTag) {
console.log('[TranscribeWorker] 语言匹配,允许:', lang)
return true
}
}
console.log('[TranscribeWorker] 语言不在白名单中,过滤掉')
return false
}
async function run() {
@@ -23,8 +60,16 @@ async function run() {
return;
}
const { modelPath, tokensPath, wavData: rawWavData, sampleRate } = workerData as WorkerParams
const { modelPath, tokensPath, wavData: rawWavData, sampleRate, languages } = workerData as WorkerParams
const wavData = Buffer.from(rawWavData);
// 确保有有效的语言列表,默认只允许中文
let allowedLanguages = languages || ['zh']
if (allowedLanguages.length === 0) {
allowedLanguages = ['zh']
}
console.log('[TranscribeWorker] 使用的语言白名单:', allowedLanguages)
// 1. 初始化识别器 (SenseVoiceSmall)
const recognizerConfig = {
modelConfig: {
@@ -66,7 +111,16 @@ async function run() {
recognizer.decode(stream)
const result = recognizer.getResult(stream)
parentPort.postMessage({ type: 'final', text: result.text })
console.log('[TranscribeWorker] 非VAD模式 - 识别结果对象:', JSON.stringify(result, null, 2))
// 检查语言是否在白名单中
if (isLanguageAllowed(result, allowedLanguages)) {
console.log('[TranscribeWorker] 非VAD模式 - 保留文本:', result.text)
parentPort.postMessage({ type: 'final', text: result.text })
} else {
console.log('[TranscribeWorker] 非VAD模式 - 语言不匹配,返回空文本')
parentPort.postMessage({ type: 'final', text: '' })
}
return
}
@@ -100,13 +154,18 @@ async function run() {
recognizer.decode(stream)
const result = recognizer.getResult(stream)
if (result.text) {
const text = result.text.trim();
console.log('[TranscribeWorker] 识别结果 - lang:', result.lang, 'text:', result.text)
// 检查语言是否在白名单中
if (result.text && isLanguageAllowed(result, allowedLanguages)) {
const text = result.text.trim()
if (text.length > 0) {
accumulatedText += (accumulatedText ? ' ' : '') + text
segmentCount++;
parentPort.postMessage({ type: 'partial', text: accumulatedText })
}
} else if (result.text) {
console.log('[TranscribeWorker] 跳过不匹配的语言段落')
}
vad.pop()
}
@@ -124,9 +183,16 @@ async function run() {
stream.acceptWaveform({ sampleRate, samples: segment.samples })
recognizer.decode(stream)
const result = recognizer.getResult(stream)
if (result.text) {
accumulatedText += (accumulatedText ? ' ' : '') + result.text.trim()
parentPort.postMessage({ type: 'partial', text: accumulatedText })
console.log('[TranscribeWorker] flush阶段 - lang:', result.lang, 'text:', result.text)
// 检查语言是否在白名单中
if (result.text && isLanguageAllowed(result, allowedLanguages)) {
const text = result.text.trim()
if (text) {
accumulatedText += (accumulatedText ? ' ' : '') + text
parentPort.postMessage({ type: 'partial', text: accumulatedText })
}
}
vad.pop();
}

View File

@@ -14,7 +14,7 @@
}
.voice-transcribe-dialog {
background: var(--color-bg-elevated);
background: var(--bg-secondary);
border-radius: 16px;
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.3);
width: 90%;
@@ -28,13 +28,13 @@
align-items: center;
justify-content: space-between;
padding: 20px 24px;
border-bottom: 1px solid var(--color-border);
border-bottom: 1px solid var(--border-color);
h3 {
margin: 0;
font-size: 18px;
font-weight: 600;
color: var(--color-text-primary);
color: var(--text-primary);
}
.close-button {
@@ -42,13 +42,13 @@
border: none;
cursor: pointer;
padding: 4px;
color: var(--color-text-secondary);
color: var(--text-secondary);
border-radius: 6px;
transition: all 0.15s ease;
&:hover {
background: var(--color-bg-hover);
color: var(--color-text-primary);
background: var(--bg-hover);
color: var(--text-primary);
}
}
}
@@ -65,19 +65,19 @@
gap: 16px;
.info-icon {
color: var(--color-primary);
color: var(--primary);
opacity: 0.8;
}
.info-text {
font-size: 15px;
color: var(--color-text-primary);
color: var(--text-primary);
margin: 0;
}
.model-info {
width: 100%;
background: var(--color-bg);
background: var(--bg-tertiary);
border-radius: 12px;
padding: 16px;
display: flex;
@@ -91,11 +91,11 @@
font-size: 14px;
.label {
color: var(--color-text-secondary);
color: var(--text-secondary);
}
.value {
color: var(--color-text-primary);
color: var(--text-primary);
font-weight: 500;
}
}
@@ -111,27 +111,27 @@
.download-icon {
.downloading-icon {
color: var(--color-primary);
color: var(--primary);
animation: bounce 1s ease-in-out infinite;
}
}
.download-text {
font-size: 15px;
color: var(--color-text-primary);
color: var(--text-primary);
margin: 0;
}
.progress-bar {
width: 100%;
height: 6px;
background: var(--color-bg);
background: var(--bg-tertiary);
border-radius: 3px;
overflow: hidden;
.progress-fill {
height: 100%;
background: linear-gradient(90deg, var(--color-primary), var(--color-accent));
background: var(--primary-gradient);
border-radius: 3px;
transition: width 0.3s ease;
}
@@ -139,10 +139,17 @@
.progress-text {
font-size: 14px;
color: var(--color-text-secondary);
color: var(--text-secondary);
margin: 0;
font-variant-numeric: tabular-nums;
}
.download-hint {
font-size: 12px;
color: var(--text-tertiary);
margin: 8px 0 0 0;
text-align: center;
}
}
.complete-section {
@@ -153,12 +160,12 @@
padding: 20px 0;
.complete-icon {
color: var(--color-success);
color: #10b981;
}
.complete-text {
font-size: 15px;
color: var(--color-text-primary);
color: var(--text-primary);
margin: 0;
}
}
@@ -196,16 +203,16 @@
gap: 6px;
&.btn-secondary {
background: var(--color-bg);
color: var(--color-text-primary);
background: var(--bg-tertiary);
color: var(--text-primary);
&:hover {
background: var(--color-bg-hover);
background: var(--bg-hover);
}
}
&.btn-primary {
background: var(--color-primary);
background: var(--primary);
color: white;
&:hover {

View File

@@ -18,7 +18,12 @@ export const VoiceTranscribeDialog: React.FC<VoiceTranscribeDialogProps> = ({
useEffect(() => {
// 监听下载进度
const removeListener = window.electronAPI.whisper?.onDownloadProgress?.((payload) => {
if (!window.electronAPI?.whisper?.onDownloadProgress) {
console.warn('[VoiceTranscribeDialog] whisper API 不可用')
return
}
const removeListener = window.electronAPI.whisper.onDownloadProgress((payload) => {
if (payload.percent !== undefined) {
setDownloadProgress(payload.percent)
}
@@ -30,12 +35,17 @@ export const VoiceTranscribeDialog: React.FC<VoiceTranscribeDialogProps> = ({
}, [])
const handleDownload = async () => {
if (!window.electronAPI?.whisper?.downloadModel) {
setDownloadError('语音转文字功能不可用')
return
}
setIsDownloading(true)
setDownloadError(null)
setDownloadProgress(0)
try {
const result = await window.electronAPI.whisper?.downloadModel()
const result = await window.electronAPI.whisper.downloadModel()
if (result?.success) {
setIsComplete(true)
@@ -56,7 +66,7 @@ export const VoiceTranscribeDialog: React.FC<VoiceTranscribeDialogProps> = ({
}
const handleCancel = () => {
if (!isDownloading) {
if (!isDownloading && !isComplete) {
onClose()
}
}
@@ -66,7 +76,7 @@ export const VoiceTranscribeDialog: React.FC<VoiceTranscribeDialogProps> = ({
<div className="voice-transcribe-dialog" onClick={(e) => e.stopPropagation()}>
<div className="dialog-header">
<h3></h3>
{!isDownloading && (
{!isDownloading && !isComplete && (
<button className="close-button" onClick={onClose}>
<X size={20} />
</button>
@@ -121,7 +131,9 @@ export const VoiceTranscribeDialog: React.FC<VoiceTranscribeDialogProps> = ({
<div className="download-icon">
<Download size={48} className="downloading-icon" />
</div>
<p className="download-text">...</p>
<p className="download-text">
{downloadProgress < 1 ? '正在连接服务器...' : '正在下载模型...'}
</p>
<div className="progress-bar">
<div
className="progress-fill"
@@ -129,6 +141,9 @@ export const VoiceTranscribeDialog: React.FC<VoiceTranscribeDialogProps> = ({
/>
</div>
<p className="progress-text">{downloadProgress.toFixed(1)}%</p>
{downloadProgress < 1 && (
<p className="download-hint"></p>
)}
</div>
)}

View File

@@ -987,6 +987,11 @@ function ChatPage(_props: ChatPageProps) {
})
}
const handleRequireModelDownload = useCallback((sessionId: string, messageId: string) => {
setPendingVoiceTranscriptRequest({ sessionId, messageId })
setShowVoiceTranscribeDialog(true)
}, [])
return (
<div className={`chat-page ${isResizing ? 'resizing' : ''}`}>
{/* 左侧会话列表 */}
@@ -1166,6 +1171,7 @@ function ChatPage(_props: ChatPageProps) {
showTime={!showDateDivider && showTime}
myAvatarUrl={myAvatarUrl}
isGroupChat={isGroupChat(currentSession.username)}
onRequireModelDownload={handleRequireModelDownload}
/>
</div>
)
@@ -1298,20 +1304,16 @@ function ChatPage(_props: ChatPageProps) {
}}
onDownloadComplete={async () => {
setShowVoiceTranscribeDialog(false)
// 下载完成后,继续转写
// 下载完成后,触发页面刷新让组件重新尝试转写
// 通过更新缓存触发组件重新检查
if (pendingVoiceTranscriptRequest) {
try {
const result = await window.electronAPI.chat.getVoiceTranscript(
pendingVoiceTranscriptRequest.sessionId,
pendingVoiceTranscriptRequest.messageId
)
if (result.success) {
const cacheKey = `voice-transcript:${pendingVoiceTranscriptRequest.messageId}`
voiceTranscriptCache.set(cacheKey, (result.transcript || '').trim())
}
} catch (error) {
console.error('[ChatPage] 语音转文字失败:', error)
}
// 清除缓存中的请求标记,让组件可以重新尝试
const cacheKey = `voice-transcript:${pendingVoiceTranscriptRequest.messageId}`
// 不直接调用转写,而是让组件自己重试
// 通过触发一个自定义事件来通知所有 MessageBubble 组件
window.dispatchEvent(new CustomEvent('model-downloaded', {
detail: { messageId: pendingVoiceTranscriptRequest.messageId }
}))
}
setPendingVoiceTranscriptRequest(null)
}}
@@ -1330,12 +1332,13 @@ const senderAvatarCache = new Map<string, { avatarUrl?: string; displayName?: st
const senderAvatarLoading = new Map<string, Promise<{ avatarUrl?: string; displayName?: string } | null>>()
// 消息气泡组件
function MessageBubble({ message, session, showTime, myAvatarUrl, isGroupChat }: {
function MessageBubble({ message, session, showTime, myAvatarUrl, isGroupChat, onRequireModelDownload }: {
message: Message;
session: ChatSession;
showTime?: boolean;
myAvatarUrl?: string;
isGroupChat?: boolean;
onRequireModelDownload?: (sessionId: string, messageId: string) => void;
}) {
const isSystem = isSystemMessage(message.localType)
const isEmoji = message.localType === 47
@@ -1682,21 +1685,27 @@ function MessageBubble({ message, session, showTime, myAvatarUrl, isGroupChat }:
const requestVoiceTranscript = useCallback(async () => {
if (voiceTranscriptLoading || voiceTranscriptRequestedRef.current) return
// 检查模型状态
const modelStatus = await window.electronAPI.whisper?.getModelStatus()
if (!modelStatus?.exists) {
// 模型未下载,抛出错误让外层处理
const error: any = new Error('MODEL_NOT_DOWNLOADED')
error.requiresDownload = true
error.sessionId = session.username
error.messageId = String(message.localId)
throw error
// 检查 whisper API 是否可用
if (!window.electronAPI?.whisper?.getModelStatus) {
console.warn('[ChatPage] whisper API 不可用')
setVoiceTranscriptError(true)
return
}
voiceTranscriptRequestedRef.current = true
setVoiceTranscriptLoading(true)
setVoiceTranscriptError(false)
try {
// 检查模型状态
const modelStatus = await window.electronAPI.whisper.getModelStatus()
if (!modelStatus?.exists) {
const error: any = new Error('MODEL_NOT_DOWNLOADED')
error.requiresDownload = true
error.sessionId = session.username
error.messageId = String(message.localId)
throw error
}
const result = await window.electronAPI.chat.getVoiceTranscript(session.username, String(message.localId))
if (result.success) {
const transcriptText = (result.transcript || '').trim()
@@ -1709,8 +1718,10 @@ function MessageBubble({ message, session, showTime, myAvatarUrl, isGroupChat }:
} catch (error: any) {
// 检查是否是模型未下载错误
if (error?.requiresDownload) {
// 不显示错误状态,等待用户手动点击转文字按钮时会触发下载弹窗
voiceTranscriptRequestedRef.current = false
// 模型未下载,触发下载弹窗
onRequireModelDownload?.(error.sessionId, error.messageId)
// 不要重置 voiceTranscriptRequestedRef避免重复触发
setVoiceTranscriptLoading(false)
return
}
setVoiceTranscriptError(true)
@@ -1718,7 +1729,27 @@ function MessageBubble({ message, session, showTime, myAvatarUrl, isGroupChat }:
} finally {
setVoiceTranscriptLoading(false)
}
}, [message.localId, session.username, voiceTranscriptCacheKey, voiceTranscriptLoading])
}, [message.localId, session.username, voiceTranscriptCacheKey, voiceTranscriptLoading, onRequireModelDownload])
// 监听模型下载完成事件
useEffect(() => {
if (!isVoice) return
const handleModelDownloaded = (event: CustomEvent) => {
if (event.detail?.messageId === String(message.localId)) {
// 重置状态,允许重新尝试转写
voiceTranscriptRequestedRef.current = false
setVoiceTranscriptError(false)
// 立即尝试转写
void requestVoiceTranscript()
}
}
window.addEventListener('model-downloaded', handleModelDownloaded as EventListener)
return () => {
window.removeEventListener('model-downloaded', handleModelDownloaded as EventListener)
}
}, [isVoice, message.localId, requestVoiceTranscript])
// 根据设置决定是否自动转写
const [autoTranscribeEnabled, setAutoTranscribeEnabled] = useState(false)

View File

@@ -352,6 +352,37 @@
color: var(--text-secondary);
}
.language-checkboxes {
display: flex;
flex-wrap: wrap;
gap: 16px;
margin-top: 8px;
}
.language-checkbox {
display: flex;
align-items: center;
gap: 8px;
cursor: pointer;
user-select: none;
input[type="checkbox"] {
width: 18px;
height: 18px;
cursor: pointer;
accent-color: var(--primary);
}
.checkbox-label {
font-size: 14px;
color: var(--text-primary);
}
&:hover .checkbox-label {
color: var(--primary);
}
}
.switch {
position: relative;
width: 46px;

View File

@@ -48,6 +48,7 @@ function SettingsPage() {
const [whisperDownloadProgress, setWhisperDownloadProgress] = useState(0)
const [whisperModelStatus, setWhisperModelStatus] = useState<{ exists: boolean; modelPath?: string; tokensPath?: string } | null>(null)
const [autoTranscribeVoice, setAutoTranscribeVoice] = useState(false)
const [transcribeLanguages, setTranscribeLanguages] = useState<string[]>(['zh'])
const [isLoading, setIsLoadingState] = useState(false)
const [isTesting, setIsTesting] = useState(false)
@@ -112,6 +113,7 @@ function SettingsPage() {
const savedWhisperModelName = await configService.getWhisperModelName()
const savedWhisperModelDir = await configService.getWhisperModelDir()
const savedAutoTranscribe = await configService.getAutoTranscribeVoice()
const savedTranscribeLanguages = await configService.getTranscribeLanguages()
if (savedKey) setDecryptKey(savedKey)
if (savedPath) setDbPath(savedPath)
@@ -123,6 +125,15 @@ function SettingsPage() {
if (savedImageAesKey) setImageAesKey(savedImageAesKey)
setLogEnabled(savedLogEnabled)
setAutoTranscribeVoice(savedAutoTranscribe)
setTranscribeLanguages(savedTranscribeLanguages)
// 如果语言列表为空,保存默认值
if (!savedTranscribeLanguages || savedTranscribeLanguages.length === 0) {
const defaultLanguages = ['zh']
setTranscribeLanguages(defaultLanguages)
await configService.setTranscribeLanguages(defaultLanguages)
}
if (savedWhisperModelDir) setWhisperModelDir(savedWhisperModelDir)
} catch (e) {
console.error('加载配置失败:', e)
@@ -454,6 +465,7 @@ function SettingsPage() {
}
await configService.setWhisperModelDir(whisperModelDir)
await configService.setAutoTranscribeVoice(autoTranscribeVoice)
await configService.setTranscribeLanguages(transcribeLanguages)
await configService.setOnboardingDone(true)
showMessage('配置保存成功,正在测试连接...', true)
@@ -490,6 +502,7 @@ function SettingsPage() {
setCachePath('')
setLogEnabled(false)
setAutoTranscribeVoice(false)
setTranscribeLanguages(['zh'])
setWhisperModelDir('')
setWhisperModelStatus(null)
setWhisperDownloadProgress(0)
@@ -757,6 +770,46 @@ function SettingsPage() {
</label>
</div>
</div>
<div className="form-group">
<label></label>
<span className="form-hint"></span>
<div className="language-checkboxes">
{[
{ code: 'zh', name: '中文' },
{ code: 'en', name: '英文' },
{ code: 'ja', name: '日文' },
{ code: 'ko', name: '韩文' }
].map((lang) => (
<label key={lang.code} className="language-checkbox">
<input
type="checkbox"
checked={transcribeLanguages.includes(lang.code)}
onChange={async (e) => {
const checked = e.target.checked
let newLanguages: string[]
if (checked) {
// 添加语言
newLanguages = [...transcribeLanguages, lang.code]
} else {
// 移除语言,但至少保留一个
if (transcribeLanguages.length <= 1) {
showMessage('至少需要选择一种语言', false)
return
}
newLanguages = transcribeLanguages.filter(l => l !== lang.code)
}
setTranscribeLanguages(newLanguages)
await configService.setTranscribeLanguages(newLanguages)
showMessage(`${checked ? '添加' : '移除'}${lang.name}`, true)
}}
/>
<span className="checkbox-label">{lang.name}</span>
</label>
))}
</div>
</div>
<div className="form-group whisper-section">
<label> (SenseVoiceSmall)</label>
<span className="form-hint"> Sherpa-onnx</span>

View File

@@ -21,7 +21,8 @@ export const CONFIG_KEYS = {
WHISPER_MODEL_NAME: 'whisperModelName',
WHISPER_MODEL_DIR: 'whisperModelDir',
WHISPER_DOWNLOAD_SOURCE: 'whisperDownloadSource',
AUTO_TRANSCRIBE_VOICE: 'autoTranscribeVoice'
AUTO_TRANSCRIBE_VOICE: 'autoTranscribeVoice',
TRANSCRIBE_LANGUAGES: 'transcribeLanguages'
} as const
// 获取解密密钥
@@ -230,3 +231,15 @@ export async function getAutoTranscribeVoice(): Promise<boolean> {
export async function setAutoTranscribeVoice(enabled: boolean): Promise<void> {
await config.set(CONFIG_KEYS.AUTO_TRANSCRIBE_VOICE, enabled)
}
// 获取语音转文字支持的语言列表
export async function getTranscribeLanguages(): Promise<string[]> {
const value = await config.get(CONFIG_KEYS.TRANSCRIBE_LANGUAGES)
// 默认只支持中文
return (value as string[]) || ['zh']
}
// 设置语音转文字支持的语言列表
export async function setTranscribeLanguages(languages: string[]): Promise<void> {
await config.set(CONFIG_KEYS.TRANSCRIBE_LANGUAGES, languages)
}