mirror of
https://github.com/hicccc77/WeFlow.git
synced 2026-04-11 23:15:51 +00:00
feat: ai功能的初次提交
This commit is contained in:
450
electron/services/aiAgentService.ts
Normal file
450
electron/services/aiAgentService.ts
Normal file
@@ -0,0 +1,450 @@
|
||||
import http from 'http'
|
||||
import https from 'https'
|
||||
import { randomUUID } from 'crypto'
|
||||
import { URL } from 'url'
|
||||
import { ConfigService } from './config'
|
||||
import { aiAnalysisService, type AiAnalysisRunEvent } from './aiAnalysisService'
|
||||
|
||||
export interface TokenUsage {
|
||||
promptTokens?: number
|
||||
completionTokens?: number
|
||||
totalTokens?: number
|
||||
}
|
||||
|
||||
export interface AgentRuntimeStatus {
|
||||
phase: 'idle' | 'thinking' | 'tool_running' | 'responding' | 'completed' | 'error' | 'aborted'
|
||||
round?: number
|
||||
currentTool?: string
|
||||
toolsUsed?: number
|
||||
updatedAt: number
|
||||
totalUsage?: TokenUsage
|
||||
}
|
||||
|
||||
export interface AgentStreamChunk {
|
||||
runId: string
|
||||
conversationId?: string
|
||||
type: 'content' | 'think' | 'tool_start' | 'tool_result' | 'status' | 'done' | 'error'
|
||||
content?: string
|
||||
thinkTag?: string
|
||||
thinkDurationMs?: number
|
||||
toolName?: string
|
||||
toolParams?: Record<string, unknown>
|
||||
toolResult?: unknown
|
||||
error?: string
|
||||
isFinished?: boolean
|
||||
usage?: TokenUsage
|
||||
status?: AgentRuntimeStatus
|
||||
}
|
||||
|
||||
export interface AgentRunPayload {
|
||||
mode?: 'chat' | 'sql'
|
||||
conversationId?: string
|
||||
userInput: string
|
||||
assistantId?: string
|
||||
activeSkillId?: string
|
||||
chatScope?: 'group' | 'private'
|
||||
sqlContext?: {
|
||||
schemaText?: string
|
||||
targetHint?: string
|
||||
}
|
||||
}
|
||||
|
||||
interface ActiveAgentRun {
|
||||
runId: string
|
||||
mode: 'chat' | 'sql'
|
||||
conversationId?: string
|
||||
innerRunId?: string
|
||||
aborted: boolean
|
||||
}
|
||||
|
||||
function normalizeText(value: unknown, fallback = ''): string {
|
||||
const text = String(value ?? '').trim()
|
||||
return text || fallback
|
||||
}
|
||||
|
||||
function buildApiUrl(baseUrl: string, path: string): string {
|
||||
const base = baseUrl.replace(/\/+$/, '')
|
||||
const suffix = path.startsWith('/') ? path : `/${path}`
|
||||
return `${base}${suffix}`
|
||||
}
|
||||
|
||||
function extractSqlText(raw: string): string {
|
||||
const text = normalizeText(raw)
|
||||
if (!text) return ''
|
||||
const fenced = text.match(/```(?:sql)?\s*([\s\S]*?)```/i)
|
||||
if (fenced?.[1]) return fenced[1].trim()
|
||||
return text
|
||||
}
|
||||
|
||||
class AiAgentService {
|
||||
private readonly config = ConfigService.getInstance()
|
||||
private readonly runs = new Map<string, ActiveAgentRun>()
|
||||
|
||||
private getSharedModelConfig(): { apiBaseUrl: string; apiKey: string; model: string } {
|
||||
return {
|
||||
apiBaseUrl: normalizeText(this.config.get('aiModelApiBaseUrl')),
|
||||
apiKey: normalizeText(this.config.get('aiModelApiKey')),
|
||||
model: normalizeText(this.config.get('aiModelApiModel'), 'gpt-4o-mini')
|
||||
}
|
||||
}
|
||||
|
||||
private emitStatus(
|
||||
run: ActiveAgentRun,
|
||||
onChunk: (chunk: AgentStreamChunk) => void,
|
||||
phase: AgentRuntimeStatus['phase'],
|
||||
extra?: Partial<AgentRuntimeStatus>
|
||||
): void {
|
||||
onChunk({
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
type: 'status',
|
||||
status: {
|
||||
phase,
|
||||
updatedAt: Date.now(),
|
||||
...extra
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
private mapRunEventToChunk(
|
||||
run: ActiveAgentRun,
|
||||
event: AiAnalysisRunEvent
|
||||
): AgentStreamChunk | null {
|
||||
run.innerRunId = event.runId
|
||||
run.conversationId = event.conversationId || run.conversationId
|
||||
if (event.stage === 'llm_round_started') {
|
||||
return {
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
type: 'think',
|
||||
content: event.message,
|
||||
thinkTag: 'round'
|
||||
}
|
||||
}
|
||||
if (event.stage === 'tool_start') {
|
||||
return {
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
type: 'tool_start',
|
||||
toolName: event.toolName,
|
||||
toolParams: (event.data || {}) as Record<string, unknown>
|
||||
}
|
||||
}
|
||||
if (event.stage === 'tool_done' || event.stage === 'tool_error') {
|
||||
return {
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
type: 'tool_result',
|
||||
toolName: event.toolName,
|
||||
toolResult: event.data || { status: event.status, durationMs: event.durationMs }
|
||||
}
|
||||
}
|
||||
if (event.stage === 'completed') {
|
||||
return {
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
type: 'status',
|
||||
status: { phase: 'completed', updatedAt: Date.now() }
|
||||
}
|
||||
}
|
||||
if (event.stage === 'aborted') {
|
||||
return {
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
type: 'status',
|
||||
status: { phase: 'aborted', updatedAt: Date.now() }
|
||||
}
|
||||
}
|
||||
if (event.stage === 'error') {
|
||||
return {
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
type: 'status',
|
||||
status: { phase: 'error', updatedAt: Date.now() }
|
||||
}
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
private async callModel(payload: any, apiBaseUrl: string, apiKey: string): Promise<any> {
|
||||
const endpoint = buildApiUrl(apiBaseUrl, '/chat/completions')
|
||||
const body = JSON.stringify(payload)
|
||||
const urlObj = new URL(endpoint)
|
||||
return new Promise((resolve, reject) => {
|
||||
const requestFn = urlObj.protocol === 'https:' ? https.request : http.request
|
||||
const req = requestFn({
|
||||
hostname: urlObj.hostname,
|
||||
port: urlObj.port || (urlObj.protocol === 'https:' ? 443 : 80),
|
||||
path: urlObj.pathname + urlObj.search,
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Content-Length': Buffer.byteLength(body).toString(),
|
||||
Authorization: `Bearer ${apiKey}`
|
||||
}
|
||||
}, (res) => {
|
||||
let data = ''
|
||||
res.on('data', (chunk) => { data += String(chunk) })
|
||||
res.on('end', () => {
|
||||
try {
|
||||
resolve(JSON.parse(data || '{}'))
|
||||
} catch (error) {
|
||||
reject(new Error(`AI 响应解析失败: ${String(error)}`))
|
||||
}
|
||||
})
|
||||
})
|
||||
req.setTimeout(45_000, () => {
|
||||
req.destroy()
|
||||
reject(new Error('AI 请求超时'))
|
||||
})
|
||||
req.on('error', reject)
|
||||
req.write(body)
|
||||
req.end()
|
||||
})
|
||||
}
|
||||
|
||||
async runStream(
|
||||
payload: AgentRunPayload,
|
||||
runtime: {
|
||||
onChunk: (chunk: AgentStreamChunk) => void
|
||||
onFinished?: (result: { success: boolean; runId: string; conversationId?: string; error?: string }) => void
|
||||
}
|
||||
): Promise<{ success: boolean; runId: string }> {
|
||||
const runId = randomUUID()
|
||||
const mode = payload.mode === 'sql' ? 'sql' : 'chat'
|
||||
const run: ActiveAgentRun = {
|
||||
runId,
|
||||
mode,
|
||||
conversationId: normalizeText(payload.conversationId) || undefined,
|
||||
aborted: false
|
||||
}
|
||||
this.runs.set(runId, run)
|
||||
|
||||
this.execute(run, payload, runtime).catch((error) => {
|
||||
runtime.onChunk({
|
||||
runId,
|
||||
conversationId: run.conversationId,
|
||||
type: 'error',
|
||||
error: String((error as Error)?.message || error),
|
||||
isFinished: true
|
||||
})
|
||||
runtime.onFinished?.({
|
||||
success: false,
|
||||
runId,
|
||||
conversationId: run.conversationId,
|
||||
error: String((error as Error)?.message || error)
|
||||
})
|
||||
this.runs.delete(runId)
|
||||
})
|
||||
|
||||
return { success: true, runId }
|
||||
}
|
||||
|
||||
private async execute(
|
||||
run: ActiveAgentRun,
|
||||
payload: AgentRunPayload,
|
||||
runtime: {
|
||||
onChunk: (chunk: AgentStreamChunk) => void
|
||||
onFinished?: (result: { success: boolean; runId: string; conversationId?: string; error?: string }) => void
|
||||
}
|
||||
): Promise<void> {
|
||||
if (run.mode === 'sql') {
|
||||
await this.executeSqlMode(run, payload, runtime)
|
||||
return
|
||||
}
|
||||
this.emitStatus(run, runtime.onChunk, 'thinking')
|
||||
const result = await aiAnalysisService.sendMessage(
|
||||
normalizeText(payload.conversationId),
|
||||
normalizeText(payload.userInput),
|
||||
{
|
||||
assistantId: normalizeText(payload.assistantId),
|
||||
activeSkillId: normalizeText(payload.activeSkillId),
|
||||
chatScope: payload.chatScope === 'group' ? 'group' : 'private'
|
||||
},
|
||||
{
|
||||
onRunEvent: (event) => {
|
||||
const mapped = this.mapRunEventToChunk(run, event)
|
||||
if (mapped) runtime.onChunk(mapped)
|
||||
}
|
||||
}
|
||||
)
|
||||
if (run.aborted) {
|
||||
runtime.onChunk({
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
type: 'error',
|
||||
error: '任务已取消',
|
||||
isFinished: true
|
||||
})
|
||||
runtime.onFinished?.({
|
||||
success: false,
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
error: '任务已取消'
|
||||
})
|
||||
this.runs.delete(run.runId)
|
||||
return
|
||||
}
|
||||
if (!result.success || !result.result) {
|
||||
runtime.onChunk({
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
type: 'error',
|
||||
error: result.error || '执行失败',
|
||||
isFinished: true
|
||||
})
|
||||
runtime.onFinished?.({
|
||||
success: false,
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
error: result.error || '执行失败'
|
||||
})
|
||||
this.runs.delete(run.runId)
|
||||
return
|
||||
}
|
||||
|
||||
run.conversationId = result.result.conversationId || run.conversationId
|
||||
runtime.onChunk({
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
type: 'content',
|
||||
content: result.result.assistantText
|
||||
})
|
||||
runtime.onChunk({
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
type: 'done',
|
||||
usage: result.result.usage,
|
||||
isFinished: true
|
||||
})
|
||||
runtime.onFinished?.({ success: true, runId: run.runId, conversationId: run.conversationId })
|
||||
this.runs.delete(run.runId)
|
||||
}
|
||||
|
||||
private async executeSqlMode(
|
||||
run: ActiveAgentRun,
|
||||
payload: AgentRunPayload,
|
||||
runtime: {
|
||||
onChunk: (chunk: AgentStreamChunk) => void
|
||||
onFinished?: (result: { success: boolean; runId: string; conversationId?: string; error?: string }) => void
|
||||
}
|
||||
): Promise<void> {
|
||||
const { apiBaseUrl, apiKey, model } = this.getSharedModelConfig()
|
||||
if (!apiBaseUrl || !apiKey) {
|
||||
runtime.onChunk({
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
type: 'error',
|
||||
error: '请先在设置 > AI 通用中配置模型',
|
||||
isFinished: true
|
||||
})
|
||||
runtime.onFinished?.({ success: false, runId: run.runId, conversationId: run.conversationId, error: '模型未配置' })
|
||||
this.runs.delete(run.runId)
|
||||
return
|
||||
}
|
||||
this.emitStatus(run, runtime.onChunk, 'thinking')
|
||||
const schemaText = normalizeText(payload.sqlContext?.schemaText)
|
||||
const targetHint = normalizeText(payload.sqlContext?.targetHint)
|
||||
const systemPrompt = [
|
||||
'你是 WeFlow SQL Lab 助手。',
|
||||
'只输出一段只读 SQL。',
|
||||
'禁止输出解释、Markdown、注释、DML、DDL。'
|
||||
].join('\n')
|
||||
const userPrompt = [
|
||||
targetHint ? `目标数据源: ${targetHint}` : '',
|
||||
schemaText ? `可用 Schema:\n${schemaText}` : '',
|
||||
`需求: ${normalizeText(payload.userInput)}`
|
||||
].filter(Boolean).join('\n\n')
|
||||
|
||||
const res = await this.callModel({
|
||||
model,
|
||||
messages: [
|
||||
{ role: 'system', content: systemPrompt },
|
||||
{ role: 'user', content: userPrompt }
|
||||
],
|
||||
temperature: 0.1,
|
||||
stream: false
|
||||
}, apiBaseUrl, apiKey)
|
||||
|
||||
if (run.aborted) {
|
||||
runtime.onChunk({
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
type: 'error',
|
||||
error: '任务已取消',
|
||||
isFinished: true
|
||||
})
|
||||
runtime.onFinished?.({ success: false, runId: run.runId, conversationId: run.conversationId, error: '任务已取消' })
|
||||
this.runs.delete(run.runId)
|
||||
return
|
||||
}
|
||||
|
||||
const rawContent = normalizeText(res?.choices?.[0]?.message?.content)
|
||||
const sql = extractSqlText(rawContent)
|
||||
const usage: TokenUsage = {
|
||||
promptTokens: Number(res?.usage?.prompt_tokens || 0),
|
||||
completionTokens: Number(res?.usage?.completion_tokens || 0),
|
||||
totalTokens: Number(res?.usage?.total_tokens || 0)
|
||||
}
|
||||
if (!sql) {
|
||||
runtime.onChunk({
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
type: 'error',
|
||||
error: 'SQL 生成失败',
|
||||
isFinished: true
|
||||
})
|
||||
runtime.onFinished?.({ success: false, runId: run.runId, conversationId: run.conversationId, error: 'SQL 生成失败' })
|
||||
this.runs.delete(run.runId)
|
||||
return
|
||||
}
|
||||
for (let i = 0; i < sql.length; i += 36) {
|
||||
if (run.aborted) break
|
||||
runtime.onChunk({
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
type: 'content',
|
||||
content: sql.slice(i, i + 36)
|
||||
})
|
||||
}
|
||||
runtime.onChunk({
|
||||
runId: run.runId,
|
||||
conversationId: run.conversationId,
|
||||
type: 'done',
|
||||
usage,
|
||||
isFinished: true
|
||||
})
|
||||
runtime.onFinished?.({ success: true, runId: run.runId, conversationId: run.conversationId })
|
||||
this.runs.delete(run.runId)
|
||||
}
|
||||
|
||||
async abort(payload: { runId?: string; conversationId?: string }): Promise<{ success: boolean }> {
|
||||
const runId = normalizeText(payload.runId)
|
||||
const conversationId = normalizeText(payload.conversationId)
|
||||
if (runId) {
|
||||
const run = this.runs.get(runId)
|
||||
if (run) {
|
||||
run.aborted = true
|
||||
if (run.mode === 'chat') {
|
||||
await aiAnalysisService.abortRun({ runId: run.innerRunId, conversationId: run.conversationId })
|
||||
}
|
||||
}
|
||||
return { success: true }
|
||||
}
|
||||
|
||||
if (conversationId) {
|
||||
for (const run of this.runs.values()) {
|
||||
if (run.conversationId !== conversationId) continue
|
||||
run.aborted = true
|
||||
if (run.mode === 'chat') {
|
||||
await aiAnalysisService.abortRun({ runId: run.innerRunId, conversationId: run.conversationId })
|
||||
}
|
||||
}
|
||||
return { success: true }
|
||||
}
|
||||
return { success: true }
|
||||
}
|
||||
}
|
||||
|
||||
export const aiAgentService = new AiAgentService()
|
||||
|
||||
2673
electron/services/aiAnalysisService.ts
Normal file
2673
electron/services/aiAnalysisService.ts
Normal file
File diff suppressed because it is too large
Load Diff
30
electron/services/aiAnalysisSkills/base.md
Normal file
30
electron/services/aiAnalysisSkills/base.md
Normal file
@@ -0,0 +1,30 @@
|
||||
你是 WeFlow 的 AI 分析助手。
|
||||
|
||||
目标:
|
||||
- 精准完成用户在聊天数据上的查询、总结、分析、回忆任务。
|
||||
- 优先使用本地工具获取证据,禁止猜测或捏造。
|
||||
- 默认输出简洁中文,先给结论,再给关键依据。
|
||||
|
||||
工作原则:
|
||||
- Token 节约优先:默认只请求必要字段,只有用户明确需要或证据不足时再升级 detailLevel。
|
||||
- 先范围后细节:优先定位会话/时间范围,再拉取具体时间轴或消息。
|
||||
- 可解释性:最终结论尽量附带来源范围与统计口径。
|
||||
- 语音消息不能臆测:必须先拿语音 ID,再点名转写,再总结。
|
||||
- 联系人排行题(“谁聊得最多/最常联系”)命中 ai_query_top_contacts 后,必须直接给出“前N名+消息数”。
|
||||
- 除非用户明确要求,联系人排行默认不包含群聊和公众号。
|
||||
- 用户提到“最近/近期/lately/recent”但未给时间窗时,默认按近30天口径统计并写明口径。
|
||||
- 用户提到联系人简称(如“lr”)时,先把它当联系人缩写处理,优先命中个人会话,不要默认落到群聊。
|
||||
- 用户问“我和X聊了什么”时必须交付“主题总结”,不要贴原始逐条聊天流水。
|
||||
|
||||
Agent执行要求:
|
||||
- 用户输入直接进入推理,本地不做关键词分流,你自主决定工具计划。
|
||||
- 当用户说“今天凌晨/昨晚/某段时间的聊天”,优先调用 ai_query_time_window_activity。
|
||||
- 拿到活跃会话后,调用 ai_query_session_glimpse 对多个会话逐个抽样阅读,不要只读一个会话就停止。
|
||||
- 如果初步探索后用户目标仍模糊,主动提出 1 个关键澄清问题继续多轮对话。
|
||||
- 仅当你确认任务完成时,输出结束标记 `[[WF_DONE]]`,并紧跟 `<final_answer>...</final_answer>`。
|
||||
- 若还未完成,不要输出结束标记,继续调用工具。
|
||||
|
||||
语音处理硬规则:
|
||||
- 当用户涉及“语音内容”时,先调用 ai_list_voice_messages。
|
||||
- 让系统返回候选 ID 后,再调用 ai_transcribe_voice_messages 指定 ID。
|
||||
- 未转写成功的语音不可作为事实依据。
|
||||
@@ -0,0 +1,6 @@
|
||||
你会收到 conversation_summary(历史压缩摘要)。
|
||||
|
||||
使用方式:
|
||||
- 默认把摘要作为历史背景,不逐字复述。
|
||||
- 若摘要与最近消息冲突,以最近消息为准。
|
||||
- 若用户追问很久之前的细节,优先重新调用工具检索,不依赖旧记忆。
|
||||
@@ -0,0 +1,8 @@
|
||||
工具:ai_fetch_message_briefs
|
||||
|
||||
何时用:
|
||||
- 需要核对少量关键消息原文,避免全量展开。
|
||||
|
||||
调用建议:
|
||||
- 只传必要 items(sessionId + localId),每次少量(<=20)。
|
||||
- 默认 minimal;需要上下文再用 standard/full。
|
||||
@@ -0,0 +1,9 @@
|
||||
工具:ai_query_session_candidates
|
||||
|
||||
何时用:
|
||||
- 用户未明确具体会话,但给了关键词/关系词(如“老婆”“买车”)。
|
||||
|
||||
调用建议:
|
||||
- 首次调用 detailLevel=minimal。
|
||||
- 默认 limit 8~12,避免拉太多候选。
|
||||
- 当候选歧义较大时再升级 detailLevel=standard/full。
|
||||
@@ -0,0 +1,9 @@
|
||||
工具:ai_query_session_glimpse
|
||||
|
||||
何时用:
|
||||
- 已确定候选会话,需要“先看一点”理解上下文。
|
||||
|
||||
Agent策略:
|
||||
- 每个候选会话先抽样 6~20 条,按时间顺序阅读。
|
||||
- 不要只读一个会话就结束;优先覆盖多会话后再总结。
|
||||
- 如果出现明显分歧场景(工作/家庭/感情)需主动向用户确认分析目标。
|
||||
8
electron/services/aiAnalysisSkills/tool_source_refs.md
Normal file
8
electron/services/aiAnalysisSkills/tool_source_refs.md
Normal file
@@ -0,0 +1,8 @@
|
||||
工具:ai_query_source_refs
|
||||
|
||||
何时用:
|
||||
- 输出总结或分析后,用于来源说明与可解释卡片。
|
||||
|
||||
调用建议:
|
||||
- 默认 minimal 即可,输出 range/session_count/message_count/db_refs。
|
||||
- 只有排错或审计时再请求 full。
|
||||
@@ -0,0 +1,9 @@
|
||||
工具:ai_query_time_window_activity
|
||||
|
||||
何时用:
|
||||
- 用户提到“今天凌晨/昨晚/某个时间段”的聊天分析。
|
||||
|
||||
Agent策略:
|
||||
- 第一步必须先扫时间窗活跃会话,不要直接下结论。
|
||||
- 拿到活跃会话后,再调用 ai_query_session_glimpse 逐个会话抽样阅读。
|
||||
- 若用户目标仍不清晰,先追问 1 个关键澄清问题再继续。
|
||||
9
electron/services/aiAnalysisSkills/tool_timeline.md
Normal file
9
electron/services/aiAnalysisSkills/tool_timeline.md
Normal file
@@ -0,0 +1,9 @@
|
||||
工具:ai_query_timeline
|
||||
|
||||
何时用:
|
||||
- 回忆事件经过、梳理时间线、提取关键节点。
|
||||
|
||||
调用建议:
|
||||
- 默认 detailLevel=minimal。
|
||||
- 先小批次 limit(40~120),不够再分页 offset。
|
||||
- 需要引用原文证据时,可搭配 ai_fetch_message_briefs。
|
||||
9
electron/services/aiAnalysisSkills/tool_top_contacts.md
Normal file
9
electron/services/aiAnalysisSkills/tool_top_contacts.md
Normal file
@@ -0,0 +1,9 @@
|
||||
工具:ai_query_top_contacts
|
||||
|
||||
何时用:
|
||||
- 用户问“谁联系最密切”“谁聊得最多”“最常联系的是谁”。
|
||||
|
||||
调用建议:
|
||||
- 该问题优先调用本工具,而不是先跑时间轴。
|
||||
- 默认 detailLevel=minimal,limit 5~10。
|
||||
- 需要区分群聊时再设置 includeGroups=true。
|
||||
8
electron/services/aiAnalysisSkills/tool_topic_stats.md
Normal file
8
electron/services/aiAnalysisSkills/tool_topic_stats.md
Normal file
@@ -0,0 +1,8 @@
|
||||
工具:ai_query_topic_stats
|
||||
|
||||
何时用:
|
||||
- 用户问“多少、占比、趋势、对比”。
|
||||
|
||||
调用建议:
|
||||
- 仅在统计问题时调用,避免无谓聚合。
|
||||
- 默认 detailLevel=minimal;有统计追问再升到 standard/full。
|
||||
8
electron/services/aiAnalysisSkills/tool_voice_list.md
Normal file
8
electron/services/aiAnalysisSkills/tool_voice_list.md
Normal file
@@ -0,0 +1,8 @@
|
||||
工具:ai_list_voice_messages
|
||||
|
||||
何时用:
|
||||
- 用户提到“语音里说了什么”。
|
||||
|
||||
调用建议:
|
||||
- 第一步先拿 ID 清单,默认 detailLevel=minimal(仅 IDs)。
|
||||
- 如用户需要挑选依据,再用 standard/full 查看更多元数据。
|
||||
@@ -0,0 +1,9 @@
|
||||
工具:ai_transcribe_voice_messages
|
||||
|
||||
何时用:
|
||||
- 已明确拿到语音 ID,且用户需要读取语音内容。
|
||||
|
||||
调用建议:
|
||||
- 必须显式传 ids 或 items。
|
||||
- 单次控制在小批次(建议 <=5),失败可重试。
|
||||
- 转写成功后再参与总结;失败项单独标注,不混入结论。
|
||||
444
electron/services/aiAssistantService.ts
Normal file
444
electron/services/aiAssistantService.ts
Normal file
@@ -0,0 +1,444 @@
|
||||
import { randomUUID } from 'crypto'
|
||||
import { existsSync } from 'fs'
|
||||
import { mkdir, readdir, readFile, rm, writeFile } from 'fs/promises'
|
||||
import { join } from 'path'
|
||||
import { ConfigService } from './config'
|
||||
|
||||
export type AssistantChatType = 'group' | 'private'
|
||||
export type AssistantToolCategory = 'core' | 'analysis'
|
||||
|
||||
export interface AssistantSummary {
|
||||
id: string
|
||||
name: string
|
||||
systemPrompt: string
|
||||
presetQuestions: string[]
|
||||
allowedBuiltinTools?: string[]
|
||||
builtinId?: string
|
||||
applicableChatTypes?: AssistantChatType[]
|
||||
supportedLocales?: string[]
|
||||
}
|
||||
|
||||
export interface AssistantConfigFull extends AssistantSummary {}
|
||||
|
||||
export interface BuiltinAssistantInfo {
|
||||
id: string
|
||||
name: string
|
||||
systemPrompt: string
|
||||
applicableChatTypes?: AssistantChatType[]
|
||||
supportedLocales?: string[]
|
||||
imported: boolean
|
||||
}
|
||||
|
||||
const GENERAL_CN_MD = `---
|
||||
id: general_cn
|
||||
name: 通用分析助手
|
||||
supportedLocales:
|
||||
- zh
|
||||
presetQuestions:
|
||||
- 最近都在聊什么?
|
||||
- 谁是最活跃的人?
|
||||
- 帮我总结一下最近一周的重要聊天
|
||||
- 帮我找一下关于“旅游”的讨论
|
||||
allowedBuiltinTools:
|
||||
- ai_query_time_window_activity
|
||||
- ai_query_session_candidates
|
||||
- ai_query_session_glimpse
|
||||
- ai_query_timeline
|
||||
- ai_fetch_message_briefs
|
||||
- ai_list_voice_messages
|
||||
- ai_transcribe_voice_messages
|
||||
- ai_query_topic_stats
|
||||
- ai_query_source_refs
|
||||
- ai_query_top_contacts
|
||||
---
|
||||
|
||||
你是 WeFlow 的全局聊天分析助手。请使用工具获取证据,给出简洁、准确、可执行的结论。
|
||||
|
||||
输出要求:
|
||||
1. 先结论,再证据。
|
||||
2. 若证据不足,明确说明不足并建议下一步。
|
||||
3. 涉及语音内容时,必须先列语音 ID,再按 ID 转写。
|
||||
4. 默认中文输出,除非用户明确指定其他语言。`
|
||||
|
||||
const GENERAL_EN_MD = `---
|
||||
id: general_en
|
||||
name: General Analysis Assistant
|
||||
supportedLocales:
|
||||
- en
|
||||
presetQuestions:
|
||||
- What have people been discussing recently?
|
||||
- Who are the most active contacts?
|
||||
- Summarize my key chat topics this week
|
||||
allowedBuiltinTools:
|
||||
- ai_query_time_window_activity
|
||||
- ai_query_session_candidates
|
||||
- ai_query_session_glimpse
|
||||
- ai_query_timeline
|
||||
- ai_fetch_message_briefs
|
||||
- ai_list_voice_messages
|
||||
- ai_transcribe_voice_messages
|
||||
- ai_query_topic_stats
|
||||
- ai_query_source_refs
|
||||
- ai_query_top_contacts
|
||||
---
|
||||
|
||||
You are WeFlow's global chat analysis assistant.
|
||||
Always ground your answers in tool evidence, stay concise, and clearly call out uncertainty when data is insufficient.`
|
||||
|
||||
const GENERAL_JA_MD = `---
|
||||
id: general_ja
|
||||
name: 汎用分析アシスタント
|
||||
supportedLocales:
|
||||
- ja
|
||||
presetQuestions:
|
||||
- 最近どんな話題が多い?
|
||||
- 一番アクティブな相手は誰?
|
||||
- 今週の重要な会話を要約して
|
||||
allowedBuiltinTools:
|
||||
- ai_query_time_window_activity
|
||||
- ai_query_session_candidates
|
||||
- ai_query_session_glimpse
|
||||
- ai_query_timeline
|
||||
- ai_fetch_message_briefs
|
||||
- ai_list_voice_messages
|
||||
- ai_transcribe_voice_messages
|
||||
- ai_query_topic_stats
|
||||
- ai_query_source_refs
|
||||
- ai_query_top_contacts
|
||||
---
|
||||
|
||||
あなたは WeFlow のグローバルチャット分析アシスタントです。
|
||||
ツールから得た根拠に基づき、簡潔かつ正確に回答してください。`
|
||||
|
||||
const BUILTIN_ASSISTANTS = [
|
||||
{ id: 'general_cn', raw: GENERAL_CN_MD },
|
||||
{ id: 'general_en', raw: GENERAL_EN_MD },
|
||||
{ id: 'general_ja', raw: GENERAL_JA_MD }
|
||||
] as const
|
||||
|
||||
function normalizeText(value: unknown, fallback = ''): string {
|
||||
const text = String(value ?? '').trim()
|
||||
return text || fallback
|
||||
}
|
||||
|
||||
function parseInlineList(text: string): string[] {
|
||||
const raw = normalizeText(text)
|
||||
if (!raw) return []
|
||||
return raw
|
||||
.split(',')
|
||||
.map((item) => item.trim())
|
||||
.filter(Boolean)
|
||||
}
|
||||
|
||||
function splitFrontmatter(raw: string): { frontmatter: string; body: string } {
|
||||
const normalized = String(raw || '')
|
||||
if (!normalized.startsWith('---')) {
|
||||
return { frontmatter: '', body: normalized.trim() }
|
||||
}
|
||||
const end = normalized.indexOf('\n---', 3)
|
||||
if (end < 0) return { frontmatter: '', body: normalized.trim() }
|
||||
return {
|
||||
frontmatter: normalized.slice(3, end).trim(),
|
||||
body: normalized.slice(end + 4).trim()
|
||||
}
|
||||
}
|
||||
|
||||
function parseAssistantMarkdown(raw: string): AssistantConfigFull {
|
||||
const { frontmatter, body } = splitFrontmatter(raw)
|
||||
const lines = frontmatter ? frontmatter.split('\n') : []
|
||||
const data: Record<string, unknown> = {}
|
||||
let currentArrayKey = ''
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim()
|
||||
if (!trimmed) continue
|
||||
const kv = trimmed.match(/^([A-Za-z0-9_]+)\s*:\s*(.*)$/)
|
||||
if (kv) {
|
||||
const key = kv[1]
|
||||
const value = kv[2]
|
||||
if (!value) {
|
||||
data[key] = []
|
||||
currentArrayKey = key
|
||||
} else {
|
||||
data[key] = value
|
||||
currentArrayKey = ''
|
||||
}
|
||||
continue
|
||||
}
|
||||
const arr = trimmed.match(/^- (.+)$/)
|
||||
if (arr && currentArrayKey) {
|
||||
const next = Array.isArray(data[currentArrayKey]) ? data[currentArrayKey] as string[] : []
|
||||
next.push(arr[1].trim())
|
||||
data[currentArrayKey] = next
|
||||
}
|
||||
}
|
||||
|
||||
const id = normalizeText(data.id)
|
||||
const name = normalizeText(data.name, id || 'assistant')
|
||||
const applicableChatTypes = Array.isArray(data.applicableChatTypes)
|
||||
? (data.applicableChatTypes as string[]).filter((item): item is AssistantChatType => item === 'group' || item === 'private')
|
||||
: parseInlineList(String(data.applicableChatTypes || '')).filter((item): item is AssistantChatType => item === 'group' || item === 'private')
|
||||
const supportedLocales = Array.isArray(data.supportedLocales)
|
||||
? (data.supportedLocales as string[]).map((item) => item.trim()).filter(Boolean)
|
||||
: parseInlineList(String(data.supportedLocales || ''))
|
||||
const presetQuestions = Array.isArray(data.presetQuestions)
|
||||
? (data.presetQuestions as string[]).map((item) => item.trim()).filter(Boolean)
|
||||
: parseInlineList(String(data.presetQuestions || ''))
|
||||
const allowedBuiltinTools = Array.isArray(data.allowedBuiltinTools)
|
||||
? (data.allowedBuiltinTools as string[]).map((item) => item.trim()).filter(Boolean)
|
||||
: parseInlineList(String(data.allowedBuiltinTools || ''))
|
||||
const builtinId = normalizeText(data.builtinId)
|
||||
|
||||
return {
|
||||
id,
|
||||
name,
|
||||
systemPrompt: body,
|
||||
presetQuestions,
|
||||
allowedBuiltinTools,
|
||||
builtinId: builtinId || undefined,
|
||||
applicableChatTypes,
|
||||
supportedLocales
|
||||
}
|
||||
}
|
||||
|
||||
function toMarkdown(config: AssistantConfigFull): string {
|
||||
const lines = [
|
||||
'---',
|
||||
`id: ${config.id}`,
|
||||
`name: ${config.name}`
|
||||
]
|
||||
if (config.builtinId) lines.push(`builtinId: ${config.builtinId}`)
|
||||
if (config.supportedLocales && config.supportedLocales.length > 0) {
|
||||
lines.push('supportedLocales:')
|
||||
config.supportedLocales.forEach((item) => lines.push(` - ${item}`))
|
||||
}
|
||||
if (config.applicableChatTypes && config.applicableChatTypes.length > 0) {
|
||||
lines.push('applicableChatTypes:')
|
||||
config.applicableChatTypes.forEach((item) => lines.push(` - ${item}`))
|
||||
}
|
||||
if (config.presetQuestions && config.presetQuestions.length > 0) {
|
||||
lines.push('presetQuestions:')
|
||||
config.presetQuestions.forEach((item) => lines.push(` - ${item}`))
|
||||
}
|
||||
if (config.allowedBuiltinTools && config.allowedBuiltinTools.length > 0) {
|
||||
lines.push('allowedBuiltinTools:')
|
||||
config.allowedBuiltinTools.forEach((item) => lines.push(` - ${item}`))
|
||||
}
|
||||
lines.push('---')
|
||||
lines.push('')
|
||||
lines.push(config.systemPrompt || '')
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
function defaultBuiltinToolCatalog(): Array<{ name: string; category: AssistantToolCategory }> {
|
||||
return [
|
||||
{ name: 'ai_query_time_window_activity', category: 'core' },
|
||||
{ name: 'ai_query_session_candidates', category: 'core' },
|
||||
{ name: 'ai_query_session_glimpse', category: 'core' },
|
||||
{ name: 'ai_query_timeline', category: 'core' },
|
||||
{ name: 'ai_fetch_message_briefs', category: 'core' },
|
||||
{ name: 'ai_list_voice_messages', category: 'core' },
|
||||
{ name: 'ai_transcribe_voice_messages', category: 'core' },
|
||||
{ name: 'ai_query_topic_stats', category: 'analysis' },
|
||||
{ name: 'ai_query_source_refs', category: 'analysis' },
|
||||
{ name: 'ai_query_top_contacts', category: 'analysis' },
|
||||
{ name: 'activate_skill', category: 'analysis' }
|
||||
]
|
||||
}
|
||||
|
||||
class AiAssistantService {
|
||||
private readonly config = ConfigService.getInstance()
|
||||
private initialized = false
|
||||
private readonly cache = new Map<string, AssistantConfigFull>()
|
||||
|
||||
private getRootDirCandidates(): string[] {
|
||||
const dbPath = normalizeText(this.config.get('dbPath'))
|
||||
const wxid = normalizeText(this.config.get('myWxid'))
|
||||
const roots: string[] = []
|
||||
if (dbPath && wxid) {
|
||||
roots.push(join(dbPath, wxid, 'db_storage', 'wf_ai_v2'))
|
||||
roots.push(join(dbPath, wxid, 'db_storage', 'wf_ai'))
|
||||
}
|
||||
roots.push(join(process.cwd(), 'data', 'wf_ai_v2'))
|
||||
return roots
|
||||
}
|
||||
|
||||
private async getRootDir(): Promise<string> {
|
||||
const roots = this.getRootDirCandidates()
|
||||
const dir = roots[0]
|
||||
await mkdir(dir, { recursive: true })
|
||||
return dir
|
||||
}
|
||||
|
||||
private async getAssistantsDir(): Promise<string> {
|
||||
const root = await this.getRootDir()
|
||||
const dir = join(root, 'assistants')
|
||||
await mkdir(dir, { recursive: true })
|
||||
return dir
|
||||
}
|
||||
|
||||
private async ensureInitialized(): Promise<void> {
|
||||
if (this.initialized) return
|
||||
const dir = await this.getAssistantsDir()
|
||||
|
||||
for (const builtin of BUILTIN_ASSISTANTS) {
|
||||
const filePath = join(dir, `${builtin.id}.md`)
|
||||
if (!existsSync(filePath)) {
|
||||
const parsed = parseAssistantMarkdown(builtin.raw)
|
||||
const config: AssistantConfigFull = {
|
||||
...parsed,
|
||||
builtinId: parsed.id
|
||||
}
|
||||
await writeFile(filePath, toMarkdown(config), 'utf8')
|
||||
}
|
||||
}
|
||||
|
||||
this.cache.clear()
|
||||
const files = await readdir(dir)
|
||||
for (const fileName of files) {
|
||||
if (!fileName.endsWith('.md')) continue
|
||||
const filePath = join(dir, fileName)
|
||||
try {
|
||||
const raw = await readFile(filePath, 'utf8')
|
||||
const parsed = parseAssistantMarkdown(raw)
|
||||
if (!parsed.id) continue
|
||||
this.cache.set(parsed.id, parsed)
|
||||
} catch {
|
||||
// ignore broken file
|
||||
}
|
||||
}
|
||||
this.initialized = true
|
||||
}
|
||||
|
||||
async getAll(): Promise<AssistantSummary[]> {
|
||||
await this.ensureInitialized()
|
||||
return Array.from(this.cache.values())
|
||||
.sort((a, b) => a.name.localeCompare(b.name, 'zh-Hans-CN'))
|
||||
.map((assistant) => ({ ...assistant }))
|
||||
}
|
||||
|
||||
async getConfig(id: string): Promise<AssistantConfigFull | null> {
|
||||
await this.ensureInitialized()
|
||||
const key = normalizeText(id)
|
||||
const config = this.cache.get(key)
|
||||
return config ? { ...config } : null
|
||||
}
|
||||
|
||||
async create(
|
||||
payload: Omit<AssistantConfigFull, 'id'> & { id?: string }
|
||||
): Promise<{ success: boolean; id?: string; error?: string }> {
|
||||
await this.ensureInitialized()
|
||||
const id = normalizeText(payload.id, `custom_${randomUUID().replace(/-/g, '').slice(0, 12)}`)
|
||||
if (this.cache.has(id)) return { success: false, error: '助手 ID 已存在' }
|
||||
const config: AssistantConfigFull = {
|
||||
id,
|
||||
name: normalizeText(payload.name, '新助手'),
|
||||
systemPrompt: normalizeText(payload.systemPrompt),
|
||||
presetQuestions: Array.isArray(payload.presetQuestions) ? payload.presetQuestions.map((item) => normalizeText(item)).filter(Boolean) : [],
|
||||
allowedBuiltinTools: Array.isArray(payload.allowedBuiltinTools) ? payload.allowedBuiltinTools.map((item) => normalizeText(item)).filter(Boolean) : [],
|
||||
builtinId: normalizeText(payload.builtinId) || undefined,
|
||||
applicableChatTypes: Array.isArray(payload.applicableChatTypes) ? payload.applicableChatTypes : [],
|
||||
supportedLocales: Array.isArray(payload.supportedLocales) ? payload.supportedLocales.map((item) => normalizeText(item)).filter(Boolean) : []
|
||||
}
|
||||
const dir = await this.getAssistantsDir()
|
||||
await writeFile(join(dir, `${id}.md`), toMarkdown(config), 'utf8')
|
||||
this.cache.set(id, config)
|
||||
return { success: true, id }
|
||||
}
|
||||
|
||||
async update(
|
||||
id: string,
|
||||
updates: Partial<AssistantConfigFull>
|
||||
): Promise<{ success: boolean; error?: string }> {
|
||||
await this.ensureInitialized()
|
||||
const key = normalizeText(id)
|
||||
const existing = this.cache.get(key)
|
||||
if (!existing) return { success: false, error: '助手不存在' }
|
||||
const next: AssistantConfigFull = {
|
||||
...existing,
|
||||
...updates,
|
||||
id: key,
|
||||
name: normalizeText(updates.name, existing.name),
|
||||
systemPrompt: updates.systemPrompt == null ? existing.systemPrompt : normalizeText(updates.systemPrompt),
|
||||
presetQuestions: Array.isArray(updates.presetQuestions) ? updates.presetQuestions.map((item) => normalizeText(item)).filter(Boolean) : existing.presetQuestions,
|
||||
allowedBuiltinTools: Array.isArray(updates.allowedBuiltinTools) ? updates.allowedBuiltinTools.map((item) => normalizeText(item)).filter(Boolean) : existing.allowedBuiltinTools,
|
||||
applicableChatTypes: Array.isArray(updates.applicableChatTypes) ? updates.applicableChatTypes : existing.applicableChatTypes,
|
||||
supportedLocales: Array.isArray(updates.supportedLocales) ? updates.supportedLocales.map((item) => normalizeText(item)).filter(Boolean) : existing.supportedLocales
|
||||
}
|
||||
const dir = await this.getAssistantsDir()
|
||||
await writeFile(join(dir, `${key}.md`), toMarkdown(next), 'utf8')
|
||||
this.cache.set(key, next)
|
||||
return { success: true }
|
||||
}
|
||||
|
||||
async delete(id: string): Promise<{ success: boolean; error?: string }> {
|
||||
await this.ensureInitialized()
|
||||
const key = normalizeText(id)
|
||||
if (key === 'general_cn' || key === 'general_en' || key === 'general_ja') {
|
||||
return { success: false, error: '默认助手不可删除' }
|
||||
}
|
||||
const dir = await this.getAssistantsDir()
|
||||
const filePath = join(dir, `${key}.md`)
|
||||
if (existsSync(filePath)) {
|
||||
await rm(filePath, { force: true })
|
||||
}
|
||||
this.cache.delete(key)
|
||||
return { success: true }
|
||||
}
|
||||
|
||||
async reset(id: string): Promise<{ success: boolean; error?: string }> {
|
||||
await this.ensureInitialized()
|
||||
const key = normalizeText(id)
|
||||
const existing = this.cache.get(key)
|
||||
if (!existing?.builtinId) {
|
||||
return { success: false, error: '该助手不支持重置' }
|
||||
}
|
||||
const builtin = BUILTIN_ASSISTANTS.find((item) => item.id === existing.builtinId)
|
||||
if (!builtin) return { success: false, error: '内置模板不存在' }
|
||||
const parsed = parseAssistantMarkdown(builtin.raw)
|
||||
const config: AssistantConfigFull = {
|
||||
...parsed,
|
||||
id: key,
|
||||
builtinId: existing.builtinId
|
||||
}
|
||||
const dir = await this.getAssistantsDir()
|
||||
await writeFile(join(dir, `${key}.md`), toMarkdown(config), 'utf8')
|
||||
this.cache.set(key, config)
|
||||
return { success: true }
|
||||
}
|
||||
|
||||
async getBuiltinCatalog(): Promise<BuiltinAssistantInfo[]> {
|
||||
await this.ensureInitialized()
|
||||
return BUILTIN_ASSISTANTS.map((builtin) => {
|
||||
const parsed = parseAssistantMarkdown(builtin.raw)
|
||||
const imported = Array.from(this.cache.values()).some((config) => config.builtinId === builtin.id || config.id === builtin.id)
|
||||
return {
|
||||
id: parsed.id,
|
||||
name: parsed.name,
|
||||
systemPrompt: parsed.systemPrompt,
|
||||
applicableChatTypes: parsed.applicableChatTypes,
|
||||
supportedLocales: parsed.supportedLocales,
|
||||
imported
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async getBuiltinToolCatalog(): Promise<Array<{ name: string; category: AssistantToolCategory }>> {
|
||||
return defaultBuiltinToolCatalog()
|
||||
}
|
||||
|
||||
async importFromMd(rawMd: string): Promise<{ success: boolean; id?: string; error?: string }> {
|
||||
try {
|
||||
const parsed = parseAssistantMarkdown(rawMd)
|
||||
if (!parsed.id) return { success: false, error: '缺少 id' }
|
||||
if (this.cache.has(parsed.id)) return { success: false, error: '助手 ID 已存在' }
|
||||
const dir = await this.getAssistantsDir()
|
||||
await writeFile(join(dir, `${parsed.id}.md`), toMarkdown(parsed), 'utf8')
|
||||
this.cache.set(parsed.id, parsed)
|
||||
return { success: true, id: parsed.id }
|
||||
} catch (error) {
|
||||
return { success: false, error: String((error as Error)?.message || error) }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const aiAssistantService = new AiAssistantService()
|
||||
395
electron/services/aiSkillService.ts
Normal file
395
electron/services/aiSkillService.ts
Normal file
@@ -0,0 +1,395 @@
|
||||
import { existsSync } from 'fs'
|
||||
import { mkdir, readdir, readFile, rm, writeFile } from 'fs/promises'
|
||||
import { join } from 'path'
|
||||
import { ConfigService } from './config'
|
||||
|
||||
export type SkillChatScope = 'all' | 'group' | 'private'
|
||||
|
||||
export interface SkillSummary {
|
||||
id: string
|
||||
name: string
|
||||
description: string
|
||||
tags: string[]
|
||||
chatScope: SkillChatScope
|
||||
tools: string[]
|
||||
builtinId?: string
|
||||
}
|
||||
|
||||
export interface SkillDef extends SkillSummary {
|
||||
prompt: string
|
||||
}
|
||||
|
||||
export interface BuiltinSkillInfo extends SkillSummary {
|
||||
imported: boolean
|
||||
}
|
||||
|
||||
const SKILL_DEEP_TIMELINE_MD = `---
|
||||
id: deep_timeline
|
||||
name: 深度时间线追踪
|
||||
description: 适合还原某段时间内发生了什么,强调事件顺序与证据引用。
|
||||
tags:
|
||||
- timeline
|
||||
- evidence
|
||||
chatScope: all
|
||||
tools:
|
||||
- ai_query_time_window_activity
|
||||
- ai_query_session_candidates
|
||||
- ai_query_session_glimpse
|
||||
- ai_query_timeline
|
||||
- ai_fetch_message_briefs
|
||||
- ai_query_source_refs
|
||||
---
|
||||
你是“深度时间线追踪”技能。
|
||||
执行步骤:
|
||||
1. 先按时间窗扫描活跃会话,必要时补关键词筛选候选会话。
|
||||
2. 对候选会话先抽样,再拉取时间轴。
|
||||
3. 对关键节点用 ai_fetch_message_briefs 校对原文。
|
||||
4. 最后输出“结论 + 关键节点 + 来源范围”。`
|
||||
|
||||
const SKILL_CONTACT_FOCUS_MD = `---
|
||||
id: contact_focus
|
||||
name: 联系人关系聚焦
|
||||
description: 用于“我和谁聊得最多/关系变化”这类问题,强调联系人维度。
|
||||
tags:
|
||||
- contacts
|
||||
- relation
|
||||
chatScope: private
|
||||
tools:
|
||||
- ai_query_top_contacts
|
||||
- ai_query_topic_stats
|
||||
- ai_query_session_glimpse
|
||||
- ai_query_timeline
|
||||
- ai_query_source_refs
|
||||
---
|
||||
你是“联系人关系聚焦”技能。
|
||||
执行步骤:
|
||||
1. 优先调用 ai_query_top_contacts 得到候选联系人排名。
|
||||
2. 针对 Top 联系人读取抽样消息并补充时间轴。
|
||||
3. 如果用户问题涉及“变化趋势”,补 ai_query_topic_stats。
|
||||
4. 输出时必须给出对比口径(时间窗、样本范围、消息数量)。`
|
||||
|
||||
const SKILL_VOICE_AUDIT_MD = `---
|
||||
id: voice_audit
|
||||
name: 语音证据审计
|
||||
description: 对语音消息进行“先列ID再转写再总结”的合规分析。
|
||||
tags:
|
||||
- voice
|
||||
- audit
|
||||
chatScope: all
|
||||
tools:
|
||||
- ai_list_voice_messages
|
||||
- ai_transcribe_voice_messages
|
||||
- ai_query_source_refs
|
||||
---
|
||||
你是“语音证据审计”技能。
|
||||
硬规则:
|
||||
1. 必须先调用 ai_list_voice_messages 获取语音 ID 清单。
|
||||
2. 仅能转写用户明确指定的 ID,单轮最多 5 条。
|
||||
3. 未转写成功的语音不得作为事实。
|
||||
4. 输出包含“已转写 / 失败 / 待确认”三段。`
|
||||
|
||||
const BUILTIN_SKILLS = [
|
||||
{ id: 'deep_timeline', raw: SKILL_DEEP_TIMELINE_MD },
|
||||
{ id: 'contact_focus', raw: SKILL_CONTACT_FOCUS_MD },
|
||||
{ id: 'voice_audit', raw: SKILL_VOICE_AUDIT_MD }
|
||||
] as const
|
||||
|
||||
function normalizeText(value: unknown, fallback = ''): string {
|
||||
const text = String(value ?? '').trim()
|
||||
return text || fallback
|
||||
}
|
||||
|
||||
function parseInlineList(text: string): string[] {
|
||||
const raw = normalizeText(text)
|
||||
if (!raw) return []
|
||||
return raw
|
||||
.split(',')
|
||||
.map((item) => item.trim())
|
||||
.filter(Boolean)
|
||||
}
|
||||
|
||||
function splitFrontmatter(raw: string): { frontmatter: string; body: string } {
|
||||
const normalized = String(raw || '')
|
||||
if (!normalized.startsWith('---')) {
|
||||
return { frontmatter: '', body: normalized.trim() }
|
||||
}
|
||||
const end = normalized.indexOf('\n---', 3)
|
||||
if (end < 0) return { frontmatter: '', body: normalized.trim() }
|
||||
return {
|
||||
frontmatter: normalized.slice(3, end).trim(),
|
||||
body: normalized.slice(end + 4).trim()
|
||||
}
|
||||
}
|
||||
|
||||
function normalizeChatScope(value: unknown): SkillChatScope {
|
||||
const scope = normalizeText(value).toLowerCase()
|
||||
if (scope === 'group' || scope === 'private') return scope
|
||||
return 'all'
|
||||
}
|
||||
|
||||
function parseSkillMarkdown(raw: string): SkillDef {
|
||||
const { frontmatter, body } = splitFrontmatter(raw)
|
||||
const lines = frontmatter ? frontmatter.split('\n') : []
|
||||
const data: Record<string, unknown> = {}
|
||||
let currentArrayKey = ''
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim()
|
||||
if (!trimmed) continue
|
||||
const kv = trimmed.match(/^([A-Za-z0-9_]+)\s*:\s*(.*)$/)
|
||||
if (kv) {
|
||||
const key = kv[1]
|
||||
const value = kv[2]
|
||||
if (!value) {
|
||||
data[key] = []
|
||||
currentArrayKey = key
|
||||
} else {
|
||||
data[key] = value
|
||||
currentArrayKey = ''
|
||||
}
|
||||
continue
|
||||
}
|
||||
const arr = trimmed.match(/^- (.+)$/)
|
||||
if (arr && currentArrayKey) {
|
||||
const next = Array.isArray(data[currentArrayKey]) ? data[currentArrayKey] as string[] : []
|
||||
next.push(arr[1].trim())
|
||||
data[currentArrayKey] = next
|
||||
}
|
||||
}
|
||||
|
||||
const id = normalizeText(data.id)
|
||||
const name = normalizeText(data.name, id || 'skill')
|
||||
const description = normalizeText(data.description)
|
||||
const tags = Array.isArray(data.tags)
|
||||
? (data.tags as string[]).map((item) => item.trim()).filter(Boolean)
|
||||
: parseInlineList(String(data.tags || ''))
|
||||
const tools = Array.isArray(data.tools)
|
||||
? (data.tools as string[]).map((item) => item.trim()).filter(Boolean)
|
||||
: parseInlineList(String(data.tools || ''))
|
||||
const chatScope = normalizeChatScope(data.chatScope)
|
||||
const builtinId = normalizeText(data.builtinId)
|
||||
|
||||
return {
|
||||
id,
|
||||
name,
|
||||
description,
|
||||
tags,
|
||||
chatScope,
|
||||
tools,
|
||||
prompt: body,
|
||||
builtinId: builtinId || undefined
|
||||
}
|
||||
}
|
||||
|
||||
function serializeSkillMarkdown(skill: SkillDef): string {
|
||||
const lines = [
|
||||
'---',
|
||||
`id: ${skill.id}`,
|
||||
`name: ${skill.name}`,
|
||||
`description: ${skill.description}`,
|
||||
`chatScope: ${skill.chatScope}`
|
||||
]
|
||||
if (skill.builtinId) lines.push(`builtinId: ${skill.builtinId}`)
|
||||
if (skill.tags.length > 0) {
|
||||
lines.push('tags:')
|
||||
skill.tags.forEach((tag) => lines.push(` - ${tag}`))
|
||||
}
|
||||
if (skill.tools.length > 0) {
|
||||
lines.push('tools:')
|
||||
skill.tools.forEach((tool) => lines.push(` - ${tool}`))
|
||||
}
|
||||
lines.push('---')
|
||||
lines.push('')
|
||||
lines.push(skill.prompt || '')
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
class AiSkillService {
|
||||
private readonly config = ConfigService.getInstance()
|
||||
private initialized = false
|
||||
private readonly cache = new Map<string, SkillDef>()
|
||||
|
||||
private getRootDirCandidates(): string[] {
|
||||
const dbPath = normalizeText(this.config.get('dbPath'))
|
||||
const wxid = normalizeText(this.config.get('myWxid'))
|
||||
const roots: string[] = []
|
||||
if (dbPath && wxid) {
|
||||
roots.push(join(dbPath, wxid, 'db_storage', 'wf_ai_v2'))
|
||||
roots.push(join(dbPath, wxid, 'db_storage', 'wf_ai'))
|
||||
}
|
||||
roots.push(join(process.cwd(), 'data', 'wf_ai_v2'))
|
||||
return roots
|
||||
}
|
||||
|
||||
private async getRootDir(): Promise<string> {
|
||||
const roots = this.getRootDirCandidates()
|
||||
const dir = roots[0]
|
||||
await mkdir(dir, { recursive: true })
|
||||
return dir
|
||||
}
|
||||
|
||||
private async getSkillsDir(): Promise<string> {
|
||||
const root = await this.getRootDir()
|
||||
const dir = join(root, 'skills')
|
||||
await mkdir(dir, { recursive: true })
|
||||
return dir
|
||||
}
|
||||
|
||||
private async ensureInitialized(): Promise<void> {
|
||||
if (this.initialized) return
|
||||
const dir = await this.getSkillsDir()
|
||||
|
||||
for (const builtin of BUILTIN_SKILLS) {
|
||||
const filePath = join(dir, `${builtin.id}.md`)
|
||||
if (!existsSync(filePath)) {
|
||||
const parsed = parseSkillMarkdown(builtin.raw)
|
||||
const config: SkillDef = {
|
||||
...parsed,
|
||||
builtinId: parsed.id
|
||||
}
|
||||
await writeFile(filePath, serializeSkillMarkdown(config), 'utf8')
|
||||
continue
|
||||
}
|
||||
try {
|
||||
const raw = await readFile(filePath, 'utf8')
|
||||
const parsed = parseSkillMarkdown(raw)
|
||||
if (!parsed.builtinId) {
|
||||
parsed.builtinId = builtin.id
|
||||
await writeFile(filePath, serializeSkillMarkdown(parsed), 'utf8')
|
||||
}
|
||||
} catch {
|
||||
// ignore broken file
|
||||
}
|
||||
}
|
||||
|
||||
this.cache.clear()
|
||||
const files = await readdir(dir)
|
||||
for (const fileName of files) {
|
||||
if (!fileName.endsWith('.md')) continue
|
||||
const filePath = join(dir, fileName)
|
||||
try {
|
||||
const raw = await readFile(filePath, 'utf8')
|
||||
const parsed = parseSkillMarkdown(raw)
|
||||
if (!parsed.id) continue
|
||||
this.cache.set(parsed.id, parsed)
|
||||
} catch {
|
||||
// ignore broken file
|
||||
}
|
||||
}
|
||||
this.initialized = true
|
||||
}
|
||||
|
||||
async getAll(): Promise<SkillSummary[]> {
|
||||
await this.ensureInitialized()
|
||||
return Array.from(this.cache.values())
|
||||
.sort((a, b) => a.name.localeCompare(b.name, 'zh-Hans-CN'))
|
||||
.map((skill) => ({
|
||||
id: skill.id,
|
||||
name: skill.name,
|
||||
description: skill.description,
|
||||
tags: [...skill.tags],
|
||||
chatScope: skill.chatScope,
|
||||
tools: [...skill.tools],
|
||||
builtinId: skill.builtinId
|
||||
}))
|
||||
}
|
||||
|
||||
async getConfig(id: string): Promise<SkillDef | null> {
|
||||
await this.ensureInitialized()
|
||||
const key = normalizeText(id)
|
||||
const value = this.cache.get(key)
|
||||
return value ? {
|
||||
...value,
|
||||
tags: [...value.tags],
|
||||
tools: [...value.tools]
|
||||
} : null
|
||||
}
|
||||
|
||||
async create(rawMd: string): Promise<{ success: boolean; id?: string; error?: string }> {
|
||||
await this.ensureInitialized()
|
||||
try {
|
||||
const parsed = parseSkillMarkdown(rawMd)
|
||||
if (!parsed.id) return { success: false, error: '缺少 id' }
|
||||
if (this.cache.has(parsed.id)) return { success: false, error: '技能 ID 已存在' }
|
||||
const dir = await this.getSkillsDir()
|
||||
await writeFile(join(dir, `${parsed.id}.md`), serializeSkillMarkdown(parsed), 'utf8')
|
||||
this.cache.set(parsed.id, parsed)
|
||||
return { success: true, id: parsed.id }
|
||||
} catch (error) {
|
||||
return { success: false, error: String((error as Error)?.message || error) }
|
||||
}
|
||||
}
|
||||
|
||||
async update(id: string, rawMd: string): Promise<{ success: boolean; error?: string }> {
|
||||
await this.ensureInitialized()
|
||||
const key = normalizeText(id)
|
||||
const existing = this.cache.get(key)
|
||||
if (!existing) return { success: false, error: '技能不存在' }
|
||||
try {
|
||||
const parsed = parseSkillMarkdown(rawMd)
|
||||
parsed.id = key
|
||||
if (existing.builtinId && !parsed.builtinId) parsed.builtinId = existing.builtinId
|
||||
const dir = await this.getSkillsDir()
|
||||
await writeFile(join(dir, `${key}.md`), serializeSkillMarkdown(parsed), 'utf8')
|
||||
this.cache.set(key, parsed)
|
||||
return { success: true }
|
||||
} catch (error) {
|
||||
return { success: false, error: String((error as Error)?.message || error) }
|
||||
}
|
||||
}
|
||||
|
||||
async delete(id: string): Promise<{ success: boolean; error?: string }> {
|
||||
await this.ensureInitialized()
|
||||
const key = normalizeText(id)
|
||||
const dir = await this.getSkillsDir()
|
||||
const filePath = join(dir, `${key}.md`)
|
||||
if (existsSync(filePath)) {
|
||||
await rm(filePath, { force: true })
|
||||
}
|
||||
this.cache.delete(key)
|
||||
return { success: true }
|
||||
}
|
||||
|
||||
async getBuiltinCatalog(): Promise<BuiltinSkillInfo[]> {
|
||||
await this.ensureInitialized()
|
||||
return BUILTIN_SKILLS.map((builtin) => {
|
||||
const parsed = parseSkillMarkdown(builtin.raw)
|
||||
const imported = Array.from(this.cache.values()).some((skill) => skill.builtinId === parsed.id || skill.id === parsed.id)
|
||||
return {
|
||||
id: parsed.id,
|
||||
name: parsed.name,
|
||||
description: parsed.description,
|
||||
tags: parsed.tags,
|
||||
chatScope: parsed.chatScope,
|
||||
tools: parsed.tools,
|
||||
imported
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async importFromMd(rawMd: string): Promise<{ success: boolean; id?: string; error?: string }> {
|
||||
return this.create(rawMd)
|
||||
}
|
||||
|
||||
async getAutoSkillMenu(
|
||||
chatScope: SkillChatScope,
|
||||
allowedTools?: string[]
|
||||
): Promise<string | null> {
|
||||
await this.ensureInitialized()
|
||||
const compatible = Array.from(this.cache.values()).filter((skill) => {
|
||||
if (skill.chatScope !== 'all' && skill.chatScope !== chatScope) return false
|
||||
if (!allowedTools || allowedTools.length === 0) return true
|
||||
return skill.tools.every((tool) => allowedTools.includes(tool))
|
||||
})
|
||||
if (compatible.length === 0) return null
|
||||
const lines = compatible.slice(0, 15).map((skill) => `- ${skill.id}: ${skill.name} - ${skill.description}`)
|
||||
return [
|
||||
'你可以按需调用工具 activate_skill 以激活对应技能。',
|
||||
'当用户问题明显匹配某个技能时,先调用 activate_skill 获取执行手册。',
|
||||
'若问题简单或不匹配技能,可直接回答。',
|
||||
'',
|
||||
...lines
|
||||
].join('\n')
|
||||
}
|
||||
}
|
||||
|
||||
export const aiSkillService = new AiSkillService()
|
||||
@@ -74,6 +74,16 @@ interface ConfigSchema {
|
||||
aiModelApiBaseUrl: string
|
||||
aiModelApiKey: string
|
||||
aiModelApiModel: string
|
||||
aiAgentMaxMessagesPerRequest: number
|
||||
aiAgentMaxHistoryRounds: number
|
||||
aiAgentEnableAutoSkill: boolean
|
||||
aiAgentSearchContextBefore: number
|
||||
aiAgentSearchContextAfter: number
|
||||
aiAgentPreprocessClean: boolean
|
||||
aiAgentPreprocessMerge: boolean
|
||||
aiAgentPreprocessDenoise: boolean
|
||||
aiAgentPreprocessDesensitize: boolean
|
||||
aiAgentPreprocessAnonymize: boolean
|
||||
aiInsightEnabled: boolean
|
||||
aiInsightApiBaseUrl: string
|
||||
aiInsightApiKey: string
|
||||
@@ -184,6 +194,16 @@ export class ConfigService {
|
||||
aiModelApiBaseUrl: '',
|
||||
aiModelApiKey: '',
|
||||
aiModelApiModel: 'gpt-4o-mini',
|
||||
aiAgentMaxMessagesPerRequest: 120,
|
||||
aiAgentMaxHistoryRounds: 12,
|
||||
aiAgentEnableAutoSkill: true,
|
||||
aiAgentSearchContextBefore: 3,
|
||||
aiAgentSearchContextAfter: 3,
|
||||
aiAgentPreprocessClean: true,
|
||||
aiAgentPreprocessMerge: true,
|
||||
aiAgentPreprocessDenoise: true,
|
||||
aiAgentPreprocessDesensitize: false,
|
||||
aiAgentPreprocessAnonymize: false,
|
||||
aiInsightEnabled: false,
|
||||
aiInsightApiBaseUrl: '',
|
||||
aiInsightApiKey: '',
|
||||
|
||||
@@ -85,6 +85,10 @@ export class WcdbCore {
|
||||
private wcdbScanMediaStream: any = null
|
||||
private wcdbGetHeadImageBuffers: any = null
|
||||
private wcdbSearchMessages: any = null
|
||||
private wcdbAiQuerySessionCandidates: any = null
|
||||
private wcdbAiQueryTimeline: any = null
|
||||
private wcdbAiQueryTopicStats: any = null
|
||||
private wcdbAiQuerySourceRefs: any = null
|
||||
private wcdbGetSnsTimeline: any = null
|
||||
private wcdbGetSnsAnnualStats: any = null
|
||||
private wcdbGetSnsUsernames: any = null
|
||||
@@ -1060,6 +1064,26 @@ export class WcdbCore {
|
||||
} catch {
|
||||
this.wcdbSearchMessages = null
|
||||
}
|
||||
try {
|
||||
this.wcdbAiQuerySessionCandidates = this.lib.func('int32 wcdb_ai_query_session_candidates(int64 handle, const char* optionsJson, _Out_ void** outJson)')
|
||||
} catch {
|
||||
this.wcdbAiQuerySessionCandidates = null
|
||||
}
|
||||
try {
|
||||
this.wcdbAiQueryTimeline = this.lib.func('int32 wcdb_ai_query_timeline(int64 handle, const char* optionsJson, _Out_ void** outJson)')
|
||||
} catch {
|
||||
this.wcdbAiQueryTimeline = null
|
||||
}
|
||||
try {
|
||||
this.wcdbAiQueryTopicStats = this.lib.func('int32 wcdb_ai_query_topic_stats(int64 handle, const char* optionsJson, _Out_ void** outJson)')
|
||||
} catch {
|
||||
this.wcdbAiQueryTopicStats = null
|
||||
}
|
||||
try {
|
||||
this.wcdbAiQuerySourceRefs = this.lib.func('int32 wcdb_ai_query_source_refs(int64 handle, const char* optionsJson, _Out_ void** outJson)')
|
||||
} catch {
|
||||
this.wcdbAiQuerySourceRefs = null
|
||||
}
|
||||
|
||||
// wcdb_status wcdb_get_sns_timeline(wcdb_handle handle, int32_t limit, int32_t offset, const char* username, const char* keyword, int32_t start_time, int32_t end_time, char** out_json)
|
||||
try {
|
||||
@@ -3370,6 +3394,204 @@ export class WcdbCore {
|
||||
}
|
||||
}
|
||||
|
||||
private normalizeSqlIdentifier(name: string): string {
|
||||
return `"${String(name || '').replace(/"/g, '""')}"`
|
||||
}
|
||||
|
||||
private stripSqlComments(sql: string): string {
|
||||
return String(sql || '')
|
||||
.replace(/\/\*[\s\S]*?\*\//g, ' ')
|
||||
.replace(/--[^\n\r]*/g, ' ')
|
||||
.trim()
|
||||
}
|
||||
|
||||
private isSqlLabReadOnly(sql: string): boolean {
|
||||
const normalized = this.stripSqlComments(sql).trim()
|
||||
if (!normalized) return false
|
||||
if (normalized.includes('\u0000')) return false
|
||||
const hasMultipleStatements = /;[\s\r\n]*\S/.test(normalized)
|
||||
if (hasMultipleStatements) return false
|
||||
const lower = normalized.toLowerCase()
|
||||
if (/(insert|update|delete|drop|alter|create|attach|detach|replace|truncate|reindex|vacuum|analyze|begin|commit|rollback|savepoint|release)\b/.test(lower)) {
|
||||
return false
|
||||
}
|
||||
if (/pragma\s+.*(writable_schema|journal_mode|locking_mode|foreign_keys)\s*=/.test(lower)) {
|
||||
return false
|
||||
}
|
||||
return /^(select|with|pragma|explain)\b/.test(lower)
|
||||
}
|
||||
|
||||
private async sqlLabListTablesForSource(
|
||||
kind: 'message' | 'contact' | 'biz',
|
||||
path: string | null,
|
||||
maxTables: number = 60,
|
||||
maxColumns: number = 120
|
||||
): Promise<Array<{ name: string; columns: string[] }>> {
|
||||
const tableRows = await this.execQuery(
|
||||
kind,
|
||||
path,
|
||||
`SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%' ORDER BY name LIMIT ${Math.max(1, maxTables)}`
|
||||
)
|
||||
if (!tableRows.success || !Array.isArray(tableRows.rows)) return []
|
||||
|
||||
const tables: Array<{ name: string; columns: string[] }> = []
|
||||
for (const row of tableRows.rows) {
|
||||
const tableName = String((row as any)?.name || '').trim()
|
||||
if (!tableName) continue
|
||||
const pragma = await this.execQuery(kind, path, `PRAGMA table_info(${this.normalizeSqlIdentifier(tableName)})`)
|
||||
const columns = pragma.success && Array.isArray(pragma.rows)
|
||||
? pragma.rows
|
||||
.map((item: any) => String(item?.name || '').trim())
|
||||
.filter(Boolean)
|
||||
.slice(0, maxColumns)
|
||||
: []
|
||||
tables.push({ name: tableName, columns })
|
||||
}
|
||||
|
||||
return tables
|
||||
}
|
||||
|
||||
async sqlLabGetSchema(payload?: { sessionId?: string }): Promise<{
|
||||
success: boolean
|
||||
schema?: {
|
||||
generatedAt: number
|
||||
sources: Array<{
|
||||
kind: 'message' | 'contact' | 'biz'
|
||||
path: string | null
|
||||
label: string
|
||||
tables: Array<{ name: string; columns: string[] }>
|
||||
}>
|
||||
}
|
||||
schemaText?: string
|
||||
error?: string
|
||||
}> {
|
||||
if (!this.ensureReady()) {
|
||||
return { success: false, error: 'WCDB 未连接' }
|
||||
}
|
||||
|
||||
try {
|
||||
const sessionId = String(payload?.sessionId || '').trim()
|
||||
const sources: Array<{
|
||||
kind: 'message' | 'contact' | 'biz'
|
||||
path: string | null
|
||||
label: string
|
||||
tables: Array<{ name: string; columns: string[] }>
|
||||
}> = []
|
||||
|
||||
if (sessionId) {
|
||||
const tableStats = await this.getMessageTableStats(sessionId)
|
||||
const tableEntries = tableStats.success && Array.isArray(tableStats.tables) ? tableStats.tables : []
|
||||
const dbPathSet = new Set<string>()
|
||||
for (const entry of tableEntries) {
|
||||
const dbPath = String((entry as any)?.db_path || '').trim()
|
||||
if (!dbPath) continue
|
||||
dbPathSet.add(dbPath)
|
||||
}
|
||||
for (const dbPath of Array.from(dbPathSet).slice(0, 8)) {
|
||||
sources.push({
|
||||
kind: 'message',
|
||||
path: dbPath,
|
||||
label: dbPath.split(/[\\/]/).pop() || dbPath,
|
||||
tables: await this.sqlLabListTablesForSource('message', dbPath)
|
||||
})
|
||||
}
|
||||
} else {
|
||||
const messageDbs = await this.listMessageDbs()
|
||||
const paths = messageDbs.success && Array.isArray(messageDbs.data) ? messageDbs.data : []
|
||||
for (const dbPath of paths.slice(0, 8)) {
|
||||
sources.push({
|
||||
kind: 'message',
|
||||
path: dbPath,
|
||||
label: dbPath.split(/[\\/]/).pop() || dbPath,
|
||||
tables: await this.sqlLabListTablesForSource('message', dbPath)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
sources.push({
|
||||
kind: 'contact',
|
||||
path: null,
|
||||
label: 'contact',
|
||||
tables: await this.sqlLabListTablesForSource('contact', null)
|
||||
})
|
||||
sources.push({
|
||||
kind: 'biz',
|
||||
path: null,
|
||||
label: 'biz',
|
||||
tables: await this.sqlLabListTablesForSource('biz', null)
|
||||
})
|
||||
|
||||
const schemaText = sources
|
||||
.map((source) => {
|
||||
const tableLines = source.tables
|
||||
.map((table) => `- ${table.name} (${table.columns.join(', ')})`)
|
||||
.join('\n')
|
||||
return `[${source.kind}] ${source.label}\n${tableLines}`
|
||||
})
|
||||
.join('\n\n')
|
||||
|
||||
return {
|
||||
success: true,
|
||||
schema: {
|
||||
generatedAt: Date.now(),
|
||||
sources
|
||||
},
|
||||
schemaText
|
||||
}
|
||||
} catch (e) {
|
||||
return { success: false, error: String(e) }
|
||||
}
|
||||
}
|
||||
|
||||
async sqlLabExecuteReadonly(payload: {
|
||||
kind: 'message' | 'contact' | 'biz'
|
||||
path?: string | null
|
||||
sql: string
|
||||
limit?: number
|
||||
}): Promise<{
|
||||
success: boolean
|
||||
rows?: any[]
|
||||
columns?: string[]
|
||||
total?: number
|
||||
error?: string
|
||||
}> {
|
||||
if (!this.ensureReady()) {
|
||||
return { success: false, error: 'WCDB 未连接' }
|
||||
}
|
||||
|
||||
try {
|
||||
const sql = String(payload?.sql || '').trim()
|
||||
if (!this.isSqlLabReadOnly(sql)) {
|
||||
return { success: false, error: '仅允许只读 SQL(SELECT/WITH/PRAGMA/EXPLAIN)' }
|
||||
}
|
||||
|
||||
const kind = payload?.kind === 'contact' || payload?.kind === 'biz' ? payload.kind : 'message'
|
||||
const path = kind === 'message'
|
||||
? (payload?.path == null ? null : String(payload.path))
|
||||
: null
|
||||
const limit = Math.max(1, Math.min(1000, Number(payload?.limit || 200)))
|
||||
const sqlNoTail = sql.replace(/;+\s*$/, '')
|
||||
const lower = sqlNoTail.toLowerCase()
|
||||
const executable = /^(select|with)\b/.test(lower)
|
||||
? `SELECT * FROM (${sqlNoTail}) LIMIT ${limit}`
|
||||
: sqlNoTail
|
||||
|
||||
const result = await this.execQuery(kind, path, executable)
|
||||
if (!result.success) {
|
||||
return { success: false, error: result.error || '执行 SQL 失败' }
|
||||
}
|
||||
const rows = Array.isArray(result.rows) ? result.rows : []
|
||||
return {
|
||||
success: true,
|
||||
rows,
|
||||
columns: rows[0] && typeof rows[0] === 'object' ? Object.keys(rows[0] as Record<string, unknown>) : [],
|
||||
total: rows.length
|
||||
}
|
||||
} catch (e) {
|
||||
return { success: false, error: String(e) }
|
||||
}
|
||||
}
|
||||
|
||||
async execQuery(kind: string, path: string | null, sql: string, params: any[] = []): Promise<{ success: boolean; rows?: any[]; error?: string }> {
|
||||
if (!this.ensureReady()) {
|
||||
return { success: false, error: 'WCDB 未连接' }
|
||||
@@ -3979,6 +4201,110 @@ export class WcdbCore {
|
||||
}
|
||||
}
|
||||
|
||||
async aiQuerySessionCandidates(options: {
|
||||
keyword: string
|
||||
limit?: number
|
||||
beginTimestamp?: number
|
||||
endTimestamp?: number
|
||||
}): Promise<{ success: boolean; rows?: any[]; error?: string }> {
|
||||
if (!this.ensureReady()) return { success: false, error: 'WCDB 未连接' }
|
||||
if (!this.wcdbAiQuerySessionCandidates) return { success: false, error: '当前数据服务版本不支持 AI 候选会话查询' }
|
||||
try {
|
||||
const outPtr = [null as any]
|
||||
const result = this.wcdbAiQuerySessionCandidates(this.handle, JSON.stringify({
|
||||
keyword: options.keyword || '',
|
||||
limit: options.limit || 12,
|
||||
begin_timestamp: options.beginTimestamp || 0,
|
||||
end_timestamp: options.endTimestamp || 0
|
||||
}), outPtr)
|
||||
if (result !== 0 || !outPtr[0]) return { success: false, error: `AI 候选会话查询失败: ${result}` }
|
||||
const jsonStr = this.decodeJsonPtr(outPtr[0])
|
||||
if (!jsonStr) return { success: false, error: '解析 AI 候选会话结果失败' }
|
||||
const rows = JSON.parse(jsonStr)
|
||||
return { success: true, rows: Array.isArray(rows) ? rows : [] }
|
||||
} catch (e) {
|
||||
return { success: false, error: String(e) }
|
||||
}
|
||||
}
|
||||
|
||||
async aiQueryTimeline(options: {
|
||||
sessionId?: string
|
||||
keyword: string
|
||||
limit?: number
|
||||
offset?: number
|
||||
beginTimestamp?: number
|
||||
endTimestamp?: number
|
||||
}): Promise<{ success: boolean; rows?: any[]; error?: string }> {
|
||||
if (!this.ensureReady()) return { success: false, error: 'WCDB 未连接' }
|
||||
if (!this.wcdbAiQueryTimeline) return { success: false, error: '当前数据服务版本不支持 AI 时间轴查询' }
|
||||
try {
|
||||
const outPtr = [null as any]
|
||||
const result = this.wcdbAiQueryTimeline(this.handle, JSON.stringify({
|
||||
session_id: options.sessionId || '',
|
||||
keyword: options.keyword || '',
|
||||
limit: options.limit || 120,
|
||||
offset: options.offset || 0,
|
||||
begin_timestamp: options.beginTimestamp || 0,
|
||||
end_timestamp: options.endTimestamp || 0
|
||||
}), outPtr)
|
||||
if (result !== 0 || !outPtr[0]) return { success: false, error: `AI 时间轴查询失败: ${result}` }
|
||||
const jsonStr = this.decodeJsonPtr(outPtr[0])
|
||||
if (!jsonStr) return { success: false, error: '解析 AI 时间轴结果失败' }
|
||||
const rows = this.parseMessageJson(jsonStr)
|
||||
return { success: true, rows }
|
||||
} catch (e) {
|
||||
return { success: false, error: String(e) }
|
||||
}
|
||||
}
|
||||
|
||||
async aiQueryTopicStats(options: {
|
||||
sessionIds: string[]
|
||||
beginTimestamp?: number
|
||||
endTimestamp?: number
|
||||
}): Promise<{ success: boolean; data?: any; error?: string }> {
|
||||
if (!this.ensureReady()) return { success: false, error: 'WCDB 未连接' }
|
||||
if (!this.wcdbAiQueryTopicStats) return { success: false, error: '当前数据服务版本不支持 AI 主题统计' }
|
||||
try {
|
||||
const outPtr = [null as any]
|
||||
const result = this.wcdbAiQueryTopicStats(this.handle, JSON.stringify({
|
||||
session_ids_json: JSON.stringify(options.sessionIds || []),
|
||||
begin_timestamp: options.beginTimestamp || 0,
|
||||
end_timestamp: options.endTimestamp || 0
|
||||
}), outPtr)
|
||||
if (result !== 0 || !outPtr[0]) return { success: false, error: `AI 主题统计失败: ${result}` }
|
||||
const jsonStr = this.decodeJsonPtr(outPtr[0])
|
||||
if (!jsonStr) return { success: false, error: '解析 AI 主题统计失败' }
|
||||
const data = JSON.parse(jsonStr)
|
||||
return { success: true, data }
|
||||
} catch (e) {
|
||||
return { success: false, error: String(e) }
|
||||
}
|
||||
}
|
||||
|
||||
async aiQuerySourceRefs(options: {
|
||||
sessionIds: string[]
|
||||
beginTimestamp?: number
|
||||
endTimestamp?: number
|
||||
}): Promise<{ success: boolean; data?: any; error?: string }> {
|
||||
if (!this.ensureReady()) return { success: false, error: 'WCDB 未连接' }
|
||||
if (!this.wcdbAiQuerySourceRefs) return { success: false, error: '当前数据服务版本不支持 AI 来源引用查询' }
|
||||
try {
|
||||
const outPtr = [null as any]
|
||||
const result = this.wcdbAiQuerySourceRefs(this.handle, JSON.stringify({
|
||||
session_ids_json: JSON.stringify(options.sessionIds || []),
|
||||
begin_timestamp: options.beginTimestamp || 0,
|
||||
end_timestamp: options.endTimestamp || 0
|
||||
}), outPtr)
|
||||
if (result !== 0 || !outPtr[0]) return { success: false, error: `AI 来源引用查询失败: ${result}` }
|
||||
const jsonStr = this.decodeJsonPtr(outPtr[0])
|
||||
if (!jsonStr) return { success: false, error: '解析 AI 来源引用查询失败' }
|
||||
const data = JSON.parse(jsonStr)
|
||||
return { success: true, data }
|
||||
} catch (e) {
|
||||
return { success: false, error: String(e) }
|
||||
}
|
||||
}
|
||||
|
||||
async getSnsTimeline(limit: number, offset: number, usernames?: string[], keyword?: string, startTime?: number, endTime?: number): Promise<{ success: boolean; timeline?: any[]; error?: string }> {
|
||||
if (!this.ensureReady()) return { success: false, error: 'WCDB 未连接' }
|
||||
if (!this.wcdbGetSnsTimeline) return { success: false, error: '当前数据服务版本不支持获取朋友圈' }
|
||||
|
||||
@@ -489,6 +489,44 @@ export class WcdbService {
|
||||
return this.callWorker('closeMessageCursor', { cursor })
|
||||
}
|
||||
|
||||
/**
|
||||
* SQL Lab: 获取多数据源 Schema 摘要
|
||||
*/
|
||||
async sqlLabGetSchema(payload?: { sessionId?: string }): Promise<{
|
||||
success: boolean
|
||||
schema?: {
|
||||
generatedAt: number
|
||||
sources: Array<{
|
||||
kind: 'message' | 'contact' | 'biz'
|
||||
path: string | null
|
||||
label: string
|
||||
tables: Array<{ name: string; columns: string[] }>
|
||||
}>
|
||||
}
|
||||
schemaText?: string
|
||||
error?: string
|
||||
}> {
|
||||
return this.callWorker('sqlLabGetSchema', payload || {})
|
||||
}
|
||||
|
||||
/**
|
||||
* SQL Lab: 执行只读 SQL
|
||||
*/
|
||||
async sqlLabExecuteReadonly(payload: {
|
||||
kind: 'message' | 'contact' | 'biz'
|
||||
path?: string | null
|
||||
sql: string
|
||||
limit?: number
|
||||
}): Promise<{
|
||||
success: boolean
|
||||
rows?: any[]
|
||||
columns?: string[]
|
||||
total?: number
|
||||
error?: string
|
||||
}> {
|
||||
return this.callWorker('sqlLabExecuteReadonly', payload)
|
||||
}
|
||||
|
||||
/**
|
||||
* 执行 SQL 查询(仅主进程内部使用:fallback/diagnostic/低频兼容)
|
||||
*/
|
||||
@@ -542,6 +580,42 @@ export class WcdbService {
|
||||
return this.callWorker('searchMessages', { keyword, sessionId, limit, offset, beginTimestamp, endTimestamp })
|
||||
}
|
||||
|
||||
async aiQuerySessionCandidates(options: {
|
||||
keyword: string
|
||||
limit?: number
|
||||
beginTimestamp?: number
|
||||
endTimestamp?: number
|
||||
}): Promise<{ success: boolean; rows?: any[]; error?: string }> {
|
||||
return this.callWorker('aiQuerySessionCandidates', { options })
|
||||
}
|
||||
|
||||
async aiQueryTimeline(options: {
|
||||
sessionId?: string
|
||||
keyword: string
|
||||
limit?: number
|
||||
offset?: number
|
||||
beginTimestamp?: number
|
||||
endTimestamp?: number
|
||||
}): Promise<{ success: boolean; rows?: any[]; error?: string }> {
|
||||
return this.callWorker('aiQueryTimeline', { options })
|
||||
}
|
||||
|
||||
async aiQueryTopicStats(options: {
|
||||
sessionIds: string[]
|
||||
beginTimestamp?: number
|
||||
endTimestamp?: number
|
||||
}): Promise<{ success: boolean; data?: any; error?: string }> {
|
||||
return this.callWorker('aiQueryTopicStats', { options })
|
||||
}
|
||||
|
||||
async aiQuerySourceRefs(options: {
|
||||
sessionIds: string[]
|
||||
beginTimestamp?: number
|
||||
endTimestamp?: number
|
||||
}): Promise<{ success: boolean; data?: any; error?: string }> {
|
||||
return this.callWorker('aiQuerySourceRefs', { options })
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取语音数据
|
||||
*/
|
||||
|
||||
Reference in New Issue
Block a user