diff --git a/src/controllers/feedback.ts b/src/controllers/feedback.ts index 8061d3a..a36f254 100644 --- a/src/controllers/feedback.ts +++ b/src/controllers/feedback.ts @@ -1,6 +1,5 @@ import { zValidator } from '@hono/zod-validator'; import { Hono } from 'hono'; -import OpenAI from 'openai'; import { z } from 'zod'; import config from '../config'; import { LearningSystem } from '../review/learning/learning-system'; @@ -16,13 +15,13 @@ let learningSystem: LearningSystem | null = null; let reviewStore: FileReviewStore | null = null; // 初始化反馈系统(记忆系统可选) -export function initializeFeedbackSystem(openaiClient: OpenAI, store: FileReviewStore): void { +export function initializeFeedbackSystem(store: FileReviewStore): void { // 保存store实例以供handlers重用,避免多实例状态不同步 reviewStore = store; // 记忆系统为可选功能 if (config.review.qdrantUrl && config.review.enableMemory) { - memoryStore = new VectorMemoryStore(config.review.qdrantUrl, openaiClient); + memoryStore = new VectorMemoryStore(config.review.qdrantUrl); learningSystem = new LearningSystem(memoryStore, reviewStore); memoryStore.initialize().catch((err) => { diff --git a/src/review/agents/correctness-agent.ts b/src/review/agents/correctness-agent.ts index ab4dc75..8d26c52 100644 --- a/src/review/agents/correctness-agent.ts +++ b/src/review/agents/correctness-agent.ts @@ -1,18 +1,12 @@ -import OpenAI from 'openai'; +import type { LLMGateway } from '../../llm/gateway'; import type { LearningSystem } from '../learning/learning-system'; import { ToolRegistry } from '../tools/registry'; import { SpecialistAgent } from './specialist-agent'; export class CorrectnessAgent extends SpecialistAgent { - constructor( - openai: OpenAI, - model: string, - toolRegistry?: ToolRegistry, - learningSystem?: LearningSystem - ) { + constructor(gateway: LLMGateway, toolRegistry?: ToolRegistry, learningSystem?: LearningSystem) { super( - openai, - model, + gateway, 'correctness', 'Correctness Agent', '业务逻辑正确性、边界条件、空值处理和明显bug', diff --git a/src/review/agents/critic-agent.ts b/src/review/agents/critic-agent.ts index 3be61ee..db2220d 100644 --- a/src/review/agents/critic-agent.ts +++ b/src/review/agents/critic-agent.ts @@ -1,7 +1,8 @@ -import OpenAI from 'openai'; -import { logger } from '../../utils/logger'; -import { withGlobalPrompt } from '../../utils/global-prompt'; import config from '../../config'; +import type { LLMGateway } from '../../llm/gateway'; +import type { LLMMessage } from '../../llm/types'; +import { withGlobalPrompt } from '../../utils/global-prompt'; +import { logger } from '../../utils/logger'; import { Finding, ReviewContext } from '../types'; export interface CritiqueResult { @@ -19,10 +20,7 @@ export interface CritiqueIssue { } export class CriticAgent { - constructor( - private openai: OpenAI, - private model: string - ) {} + constructor(private gateway: LLMGateway) {} async critique( findings: Omit[], @@ -71,20 +69,24 @@ ${context.diff.slice(0, 3000)} }`; try { - const response = await this.openai.chat.completions.create({ - model: this.model, + const messages: LLMMessage[] = [ + { + role: 'system', + content: withGlobalPrompt( + '你是严格的代码审查质量评估专家,以高标准评估findings的质量。', + config.review.globalPrompt + ), + }, + { role: 'user', content: prompt }, + ]; + + const response = await this.gateway.chatForRole('specialist', { + messages, temperature: 0.1, // 略高于0以允许批判性思考 - response_format: { type: 'json_object' }, - messages: [ - { - role: 'system', - content: withGlobalPrompt('你是严格的代码审查质量评估专家,以高标准评估findings的质量。', config.openai.globalPrompt), - }, - { role: 'user', content: prompt }, - ], + responseFormat: 'json', }); - const content = response.choices[0]?.message.content; + const content = response.content; if (!content) { throw new Error('Critic Agent返回空结果'); } @@ -160,20 +162,21 @@ ${context.diff.slice(0, 2000)} }`; try { - const response = await this.openai.chat.completions.create({ - model: this.model, + const messages: LLMMessage[] = [ + { + role: 'system', + content: withGlobalPrompt('你是代码审查质量评估专家。', config.review.globalPrompt), + }, + { role: 'user', content: prompt }, + ]; + + const response = await this.gateway.chatForRole('specialist', { + messages, temperature: 0, - response_format: { type: 'json_object' }, - messages: [ - { - role: 'system', - content: withGlobalPrompt('你是代码审查质量评估专家。', config.openai.globalPrompt), - }, - { role: 'user', content: prompt }, - ], + responseFormat: 'json', }); - const content = response.choices[0]?.message.content; + const content = response.content; if (!content) { throw new Error('评估失败'); } diff --git a/src/review/agents/debate-orchestrator.ts b/src/review/agents/debate-orchestrator.ts index d861d08..3bb75f6 100644 --- a/src/review/agents/debate-orchestrator.ts +++ b/src/review/agents/debate-orchestrator.ts @@ -1,7 +1,8 @@ -import OpenAI from 'openai'; -import { logger } from '../../utils/logger'; -import { withGlobalPrompt } from '../../utils/global-prompt'; import config from '../../config'; +import type { LLMGateway } from '../../llm/gateway'; +import type { LLMMessage } from '../../llm/types'; +import { withGlobalPrompt } from '../../utils/global-prompt'; +import { logger } from '../../utils/logger'; import { Finding, FindingSeverity } from '../types'; import { SpecialistAgent } from './specialist-agent'; @@ -14,12 +15,10 @@ interface AgentOpinion { } export class DebateOrchestrator { - private openai: OpenAI; - private model: string; + private gateway: LLMGateway; - constructor(openai: OpenAI, model: string) { - this.openai = openai; - this.model = model; + constructor(gateway: LLMGateway) { + this.gateway = gateway; } async conductDebate( @@ -103,20 +102,24 @@ export class DebateOrchestrator { }`; try { - const response = await this.openai.chat.completions.create({ - model: this.model, + const messages: LLMMessage[] = [ + { + role: 'system', + content: withGlobalPrompt( + `你是${agentName},从你的专业角度独立评估代码问题。`, + config.review.globalPrompt + ), + }, + { role: 'user', content: prompt }, + ]; + + const response = await this.gateway.chatForRole('specialist', { + messages, temperature: 0.2, - response_format: { type: 'json_object' }, - messages: [ - { - role: 'system', - content: withGlobalPrompt(`你是${agentName},从你的专业角度独立评估代码问题。`, config.openai.globalPrompt), - }, - { role: 'user', content: prompt }, - ], + responseFormat: 'json', }); - const content = response.choices[0]?.message.content; + const content = response.content; if (!content) { throw new Error('Agent opinion返回空'); } @@ -180,20 +183,24 @@ ${otherOpinions }`; try { - const response = await this.openai.chat.completions.create({ - model: this.model, + const messages: LLMMessage[] = [ + { + role: 'system', + content: withGlobalPrompt( + `你是${agentName},根据同行意见重新评估,但也要坚持你的专业判断。`, + config.review.globalPrompt + ), + }, + { role: 'user', content: prompt }, + ]; + + const response = await this.gateway.chatForRole('specialist', { + messages, temperature: 0.3, // 允许一定灵活性 - response_format: { type: 'json_object' }, - messages: [ - { - role: 'system', - content: withGlobalPrompt(`你是${agentName},根据同行意见重新评估,但也要坚持你的专业判断。`, config.openai.globalPrompt), - }, - { role: 'user', content: prompt }, - ], + responseFormat: 'json', }); - const content = response.choices[0]?.message.content; + const content = response.content; if (!content) { throw new Error('Revised opinion返回空'); } diff --git a/src/review/agents/maintainability-agent.ts b/src/review/agents/maintainability-agent.ts index d5990f2..f06a449 100644 --- a/src/review/agents/maintainability-agent.ts +++ b/src/review/agents/maintainability-agent.ts @@ -1,18 +1,12 @@ -import OpenAI from 'openai'; +import type { LLMGateway } from '../../llm/gateway'; import type { LearningSystem } from '../learning/learning-system'; import { ToolRegistry } from '../tools/registry'; import { SpecialistAgent } from './specialist-agent'; export class MaintainabilityAgent extends SpecialistAgent { - constructor( - openai: OpenAI, - model: string, - toolRegistry?: ToolRegistry, - learningSystem?: LearningSystem - ) { + constructor(gateway: LLMGateway, toolRegistry?: ToolRegistry, learningSystem?: LearningSystem) { super( - openai, - model, + gateway, 'maintainability', 'Maintainability Agent', '可维护性、复杂度、接口破坏风险和可测试性不足', diff --git a/src/review/agents/reflexion-agent.ts b/src/review/agents/reflexion-agent.ts index 72b1cd4..9445c7f 100644 --- a/src/review/agents/reflexion-agent.ts +++ b/src/review/agents/reflexion-agent.ts @@ -1,8 +1,9 @@ import { createHash } from 'node:crypto'; -import OpenAI from 'openai'; -import { logger } from '../../utils/logger'; -import { withGlobalPrompt } from '../../utils/global-prompt'; import config from '../../config'; +import type { LLMGateway } from '../../llm/gateway'; +import type { LLMMessage } from '../../llm/types'; +import { withGlobalPrompt } from '../../utils/global-prompt'; +import { logger } from '../../utils/logger'; import { LearningSystem } from '../learning/learning-system'; import { findingResponseSchema } from '../schema/finding-schema'; import { ToolRegistry } from '../tools/registry'; @@ -21,16 +22,15 @@ export class ReflexionAgent extends SpecialistAgent { private criticAgent: CriticAgent; constructor( - openai: OpenAI, - model: string, + gateway: LLMGateway, category: FindingCategory, agentName: string, focusPrompt: string, toolRegistry?: ToolRegistry, learningSystem?: LearningSystem ) { - super(openai, model, category, agentName, focusPrompt, toolRegistry, learningSystem); - this.criticAgent = new CriticAgent(openai, model); + super(gateway, category, agentName, focusPrompt, toolRegistry, learningSystem); + this.criticAgent = new CriticAgent(gateway); } async reviewWithReflection( @@ -142,20 +142,24 @@ ${context.diff.slice(0, 3000)} }`; try { - const response = await this.openai.chat.completions.create({ - model: this.model, + const messages: LLMMessage[] = [ + { + role: 'system', + content: withGlobalPrompt( + `你是${this.agentName},根据批评反馈改进审查结果。`, + config.review.globalPrompt + ), + }, + { role: 'user', content: prompt }, + ]; + + const response = await this.gateway.chatForRole('specialist', { + messages, temperature: 0.1, - response_format: { type: 'json_object' }, - messages: [ - { - role: 'system', - content: withGlobalPrompt(`你是${this.agentName},根据批评反馈改进审查结果。`, config.openai.globalPrompt), - }, - { role: 'user', content: prompt }, - ], + responseFormat: 'json', }); - const content = response.choices[0]?.message.content; + const content = response.content; if (!content) { logger.warn(`${this.agentName} Refine返回空结果,使用原findings`); return draft; diff --git a/src/review/agents/reliability-agent.ts b/src/review/agents/reliability-agent.ts index cb3f715..827ee64 100644 --- a/src/review/agents/reliability-agent.ts +++ b/src/review/agents/reliability-agent.ts @@ -1,18 +1,12 @@ -import OpenAI from 'openai'; +import type { LLMGateway } from '../../llm/gateway'; import type { LearningSystem } from '../learning/learning-system'; import { ToolRegistry } from '../tools/registry'; import { SpecialistAgent } from './specialist-agent'; export class ReliabilityAgent extends SpecialistAgent { - constructor( - openai: OpenAI, - model: string, - toolRegistry?: ToolRegistry, - learningSystem?: LearningSystem - ) { + constructor(gateway: LLMGateway, toolRegistry?: ToolRegistry, learningSystem?: LearningSystem) { super( - openai, - model, + gateway, 'reliability', 'Reliability Agent', '错误处理、重试策略、幂等性、并发一致性和资源释放', diff --git a/src/review/agents/security-agent.ts b/src/review/agents/security-agent.ts index fe4ceca..dfd57dc 100644 --- a/src/review/agents/security-agent.ts +++ b/src/review/agents/security-agent.ts @@ -1,18 +1,12 @@ -import OpenAI from 'openai'; +import type { LLMGateway } from '../../llm/gateway'; import type { LearningSystem } from '../learning/learning-system'; import { ToolRegistry } from '../tools/registry'; import { SpecialistAgent } from './specialist-agent'; export class SecurityAgent extends SpecialistAgent { - constructor( - openai: OpenAI, - model: string, - toolRegistry?: ToolRegistry, - learningSystem?: LearningSystem - ) { + constructor(gateway: LLMGateway, toolRegistry?: ToolRegistry, learningSystem?: LearningSystem) { super( - openai, - model, + gateway, 'security', 'Security Agent', '注入漏洞、权限绕过、敏感信息泄露、反序列化和输入校验缺失', diff --git a/src/review/agents/specialist-agent.ts b/src/review/agents/specialist-agent.ts index f979a08..6df7500 100644 --- a/src/review/agents/specialist-agent.ts +++ b/src/review/agents/specialist-agent.ts @@ -1,8 +1,9 @@ import { createHash } from 'node:crypto'; -import OpenAI from 'openai'; -import { logger } from '../../utils/logger'; -import { withGlobalPrompt } from '../../utils/global-prompt'; import config from '../../config'; +import type { LLMGateway } from '../../llm/gateway'; +import type { LLMMessage, LLMToolCall } from '../../llm/types'; +import { withGlobalPrompt } from '../../utils/global-prompt'; +import { logger } from '../../utils/logger'; import type { LearningSystem } from '../learning/learning-system'; import { findingResponseSchema } from '../schema/finding-schema'; import { ToolRegistry } from '../tools/registry'; @@ -90,8 +91,7 @@ function toCompactContext(context: ReviewContext): string { export class SpecialistAgent { constructor( - protected readonly openai: OpenAI, - protected readonly model: string, + protected readonly gateway: LLMGateway, protected readonly category: FindingCategory, protected readonly agentName: string, protected readonly focusPrompt: string, @@ -123,21 +123,24 @@ export class SpecialistAgent { ${toCompactContext(context)}`; try { - const response = await this.openai.chat.completions.create({ - model: this.model, + const messages: LLMMessage[] = [ + { + role: 'system', + content: withGlobalPrompt( + '你是严格的代码审查专家。返回结构化JSON,不输出额外文字。confidence取值范围0到1。line必须是正整数且引用新增行。', + config.review.globalPrompt + ), + }, + { role: 'user', content: prompt }, + ]; + + const response = await this.gateway.chatForRole('specialist', { + messages, temperature: 0, - response_format: { type: 'json_object' }, - messages: [ - { - role: 'system', - content: - withGlobalPrompt('你是严格的代码审查专家。返回结构化JSON,不输出额外文字。confidence取值范围0到1。line必须是正整数且引用新增行。', config.openai.globalPrompt), - }, - { role: 'user', content: prompt }, - ], + responseFormat: 'json', }); - const content = response.choices[0]?.message.content; + const content = response.content; if (!content) { return { agentName: this.agentName, findings: [] }; } @@ -166,10 +169,11 @@ ${toCompactContext(context)}`; private async reviewWithReAct(run: ReviewRun, context: ReviewContext): Promise { const maxIterations = 5; const findingsMap = new Map>(); - const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [ + const messages: LLMMessage[] = [ { role: 'system', - content: withGlobalPrompt(`你是${this.agentName},专注于${this.focusPrompt}。 + content: withGlobalPrompt( + `你是${this.agentName},专注于${this.focusPrompt}。 你可以使用以下工具进行深入调查: ${this.toolRegistry!.getAll() @@ -198,7 +202,9 @@ ${this.toolRegistry!.getAll() ], "need_more_investigation": false } -每个 finding 对象的所有字段都是必填的。无问题时返回空数组 {"findings": [], "need_more_investigation": false}。`, config.openai.globalPrompt), +每个 finding 对象的所有字段都是必填的。无问题时返回空数组 {"findings": [], "need_more_investigation": false}。`, + config.review.globalPrompt + ), }, ]; @@ -211,7 +217,22 @@ ${this.toolRegistry!.getAll() run.repo ); if (fewShotExamples.length > 0) { - messages.push(...fewShotExamples); + const llmFewShotExamples = fewShotExamples + .map((msg) => { + if ( + (msg.role === 'system' || msg.role === 'user' || msg.role === 'assistant') && + typeof msg.content === 'string' + ) { + return { role: msg.role, content: msg.content } as const; + } + return null; + }) + .filter( + (msg): msg is { role: 'system' | 'user' | 'assistant'; content: string } => + msg !== null + ); + + messages.push(...llmFewShotExamples); logger.debug(`${this.agentName} 添加了 ${fewShotExamples.length} 条Few-shot示例`, { runId: run.id, }); @@ -239,24 +260,24 @@ ${this.toolRegistry!.getAll() // 仅在最后一轮迭代强制 JSON 输出(无工具调用时解析结果) // 避免 response_format: json_object 与 tools 参数冲突导致工具不被调用 const isLastIteration = iteration === maxIterations - 1; - const response = await this.openai.chat.completions.create({ - model: this.model, - temperature: 0, - ...(isLastIteration ? { response_format: { type: 'json_object' as const } } : {}), + const response = await this.gateway.chatForRole('specialist', { messages, - tools: this.toolRegistry!.toOpenAIFunctions(), - tool_choice: isLastIteration ? 'none' : 'auto', + temperature: 0, + tools: this.toolRegistry!.toToolDefinitions(), + providerOptions: { tool_choice: isLastIteration ? 'none' : 'auto' }, + responseFormat: isLastIteration ? 'json' : undefined, }); - const choice = response.choices[0]; - if (!choice) break; - // 处理工具调用 - if (choice.message.tool_calls && choice.message.tool_calls.length > 0) { - messages.push(choice.message as OpenAI.Chat.ChatCompletionMessageParam); + if (response.toolCalls.length > 0) { + messages.push({ + role: 'assistant', + content: response.content || '', + toolCalls: response.toolCalls, + }); // 执行所有工具调用 - const toolResults = await this.executeTools(choice.message.tool_calls, { + const toolResults = await this.executeTools(response.toolCalls, { workspacePath: context.workspacePath, mirrorPath: context.mirrorPath, runId: run.id, @@ -266,7 +287,7 @@ ${this.toolRegistry!.getAll() for (const toolResult of toolResults) { messages.push({ role: 'tool', - tool_call_id: toolResult.toolCallId, + toolCallId: toolResult.toolCallId, content: JSON.stringify(toolResult.result || { error: toolResult.error }), }); } @@ -275,9 +296,9 @@ ${this.toolRegistry!.getAll() } // 解析findings(模型选择返回内容而非调用工具) - if (choice.message.content) { + if (response.content) { try { - const parsed = JSON.parse(choice.message.content); + const parsed = JSON.parse(response.content); if (parsed.findings && parsed.findings.length > 0) { // 使用schema验证findings,防止畸形数据流入发布系统 @@ -301,7 +322,10 @@ ${this.toolRegistry!.getAll() } // 模型要求继续调查但没有调用工具:注入 user 消息打破潜在的自我重复 - messages.push(choice.message as OpenAI.Chat.ChatCompletionMessageParam); + messages.push({ + role: 'assistant', + content: response.content, + }); messages.push({ role: 'user', content: @@ -314,7 +338,10 @@ ${this.toolRegistry!.getAll() runId: run.id, error: parseError instanceof Error ? parseError.message : String(parseError), }); - messages.push(choice.message as OpenAI.Chat.ChatCompletionMessageParam); + messages.push({ + role: 'assistant', + content: response.content, + }); messages.push({ role: 'user', content: @@ -338,28 +365,28 @@ ${this.toolRegistry!.getAll() } private async executeTools( - toolCalls: OpenAI.Chat.ChatCompletionMessageToolCall[], + toolCalls: LLMToolCall[], context: ToolExecutionContext ): Promise { const results: ToolResult[] = []; for (const toolCall of toolCalls) { - const tool = this.toolRegistry!.get(toolCall.function.name); + const tool = this.toolRegistry!.get(toolCall.name); if (!tool) { results.push({ toolCallId: toolCall.id, success: false, - error: `工具 ${toolCall.function.name} 未找到`, + error: `工具 ${toolCall.name} 未找到`, }); continue; } try { - const params = JSON.parse(toolCall.function.arguments); + const params = JSON.parse(toolCall.arguments); const result = await tool.execute(params, context); - logger.info(`工具调用成功: ${toolCall.function.name}`, { + logger.info(`工具调用成功: ${toolCall.name}`, { runId: context.runId, params, }); @@ -370,7 +397,7 @@ ${this.toolRegistry!.getAll() result, }); } catch (error) { - logger.error(`工具调用失败: ${toolCall.function.name}`, { + logger.error(`工具调用失败: ${toolCall.name}`, { runId: context.runId, error: error instanceof Error ? error.message : String(error), }); diff --git a/src/review/learning/learning-system.ts b/src/review/learning/learning-system.ts index 5be7120..164f9da 100644 --- a/src/review/learning/learning-system.ts +++ b/src/review/learning/learning-system.ts @@ -1,4 +1,4 @@ -import OpenAI from 'openai'; +import type { LLMMessage } from '../../llm/types'; import config from '../../config'; import { logger } from '../../utils/logger'; import { VectorMemoryStore } from '../memory/vector-store'; @@ -59,7 +59,7 @@ export class LearningSystem { category: FindingCategory, owner?: string, repo?: string - ): Promise { + ): Promise { const targetCount = config.review.fewShotExamplesCount; // 提前检查:如果few-shot被禁用(targetCount=0),直接返回,避免无意义的向量查询 @@ -105,7 +105,7 @@ export class LearningSystem { ); } - const examples: OpenAI.Chat.ChatCompletionMessageParam[] = []; + const examples: LLMMessage[] = []; const negativeCount = Math.floor(targetCount * 0.4); diff --git a/src/review/memory/vector-store.ts b/src/review/memory/vector-store.ts index d44ef10..90bcc0f 100644 --- a/src/review/memory/vector-store.ts +++ b/src/review/memory/vector-store.ts @@ -1,18 +1,16 @@ import { QdrantClient } from '@qdrant/js-client-rest'; -import OpenAI from 'openai'; +import { llmGateway } from '../../llm/gateway'; import { logger } from '../../utils/logger'; import { Finding } from '../types'; import { MemoryEntry, MemorySearchResult } from './types'; export class VectorMemoryStore { private client: QdrantClient; - private openai: OpenAI; private collectionName = 'code_review_memory'; private initialized = false; - constructor(qdrantUrl: string, openaiClient: OpenAI) { + constructor(qdrantUrl: string) { this.client = new QdrantClient({ url: qdrantUrl }); - this.openai = openaiClient; } async initialize(): Promise { @@ -47,7 +45,7 @@ export class VectorMemoryStore { async storeMemory(entry: MemoryEntry): Promise { await this.initialize(); - const embedding = await this.getEmbedding(entry.content); + const [embedding] = await this.getEmbedding([entry.content]); await this.client.upsert(this.collectionName, { points: [ @@ -73,7 +71,7 @@ export class VectorMemoryStore { async searchSimilar(query: string, limit = 5, filter?: any): Promise { await this.initialize(); - const queryEmbedding = await this.getEmbedding(query); + const [queryEmbedding] = await this.getEmbedding([query]); const results = await this.client.search(this.collectionName, { vector: queryEmbedding, @@ -101,14 +99,9 @@ export class VectorMemoryStore { })); } - private async getEmbedding(text: string): Promise { + private async getEmbedding(texts: string[]): Promise { try { - const response = await this.openai.embeddings.create({ - model: 'text-embedding-3-small', - input: text.slice(0, 8000), // 限制长度防止超出token限制 - }); - - return response.data[0].embedding; + return llmGateway.embedForRole(texts.map((text) => text.slice(0, 8000))); // 限制长度防止超出token限制 } catch (error) { logger.error('生成embedding失败', { error: error instanceof Error ? error.message : String(error), diff --git a/src/review/orchestrator.ts b/src/review/orchestrator.ts index fddfd53..913e7df 100644 --- a/src/review/orchestrator.ts +++ b/src/review/orchestrator.ts @@ -1,6 +1,6 @@ import { randomUUID } from 'node:crypto'; -import OpenAI from 'openai'; import config from '../config'; +import { LLMGateway, llmGateway } from '../llm/gateway'; import { giteaService } from '../services/gitea'; import { logger } from '../utils/logger'; import { DebateOrchestrator } from './agents/debate-orchestrator'; @@ -42,7 +42,7 @@ function summarizeGatedCount(gatedCount: number): string { } export class ReviewOrchestrator { - private readonly openai: OpenAI; + private readonly gateway: LLMGateway; private readonly toolRegistry: ToolRegistry; private readonly correctnessAgent: ReflexionAgent; private readonly securityAgent: ReflexionAgent; @@ -58,10 +58,7 @@ export class ReviewOrchestrator { private readonly localRepoManager: LocalRepoManager, private readonly diffExtractor: DiffExtractor ) { - this.openai = new OpenAI({ - baseURL: config.openai.baseUrl, - apiKey: config.openai.apiKey, - }); + this.gateway = llmGateway; // 初始化工具注册表 this.toolRegistry = new ToolRegistry(); @@ -75,7 +72,7 @@ export class ReviewOrchestrator { // 初始化记忆和学习系统(可选) if (config.review.qdrantUrl && config.review.enableMemory) { - this.memoryStore = new VectorMemoryStore(config.review.qdrantUrl, this.openai); + this.memoryStore = new VectorMemoryStore(config.review.qdrantUrl); this.learningSystem = new LearningSystem(this.memoryStore, this.store); this.memoryStore.initialize().catch((err) => { @@ -87,8 +84,7 @@ export class ReviewOrchestrator { // 创建Reflexion-wrapped agents并传递工具注册表和学习系统 this.correctnessAgent = new ReflexionAgent( - this.openai, - config.review.modelSpecialist, + this.gateway, 'correctness', 'Correctness Agent', '业务逻辑正确性、边界条件、空值处理和明显bug', @@ -97,8 +93,7 @@ export class ReviewOrchestrator { ); this.securityAgent = new ReflexionAgent( - this.openai, - config.review.modelSpecialist, + this.gateway, 'security', 'Security Agent', '注入漏洞、权限绕过、敏感信息泄露、反序列化和输入校验缺失', @@ -107,8 +102,7 @@ export class ReviewOrchestrator { ); this.reliabilityAgent = new ReflexionAgent( - this.openai, - config.review.modelSpecialist, + this.gateway, 'reliability', 'Reliability Agent', '错误处理、重试策略、幂等性、并发一致性和资源释放', @@ -117,8 +111,7 @@ export class ReviewOrchestrator { ); this.maintainabilityAgent = new ReflexionAgent( - this.openai, - config.review.modelSpecialist, + this.gateway, 'maintainability', 'Maintainability Agent', '可维护性、复杂度、接口破坏风险和可测试性不足', @@ -127,7 +120,7 @@ export class ReviewOrchestrator { ); this.judgeAgent = new JudgeAgent(); - this.debateOrchestrator = new DebateOrchestrator(this.openai, config.review.modelSpecialist); + this.debateOrchestrator = new DebateOrchestrator(this.gateway); } async execute(run: ReviewRun): Promise { diff --git a/src/review/tools/registry.ts b/src/review/tools/registry.ts index 066baca..373cdaf 100644 --- a/src/review/tools/registry.ts +++ b/src/review/tools/registry.ts @@ -1,6 +1,7 @@ import { z } from 'zod'; import zodToJsonSchema from 'zod-to-json-schema'; import type { JsonSchema7Type } from 'zod-to-json-schema'; +import type { LLMToolDefinition } from '../../llm/types'; import { Tool } from './types'; export class ToolRegistry { @@ -30,6 +31,14 @@ export class ToolRegistry { })); } + toToolDefinitions(): LLMToolDefinition[] { + return this.getAll().map((tool) => ({ + name: tool.name, + description: tool.description, + parameters: this.zodToJsonSchema(tool.parameters) as Record, + })); + } + private zodToJsonSchema(schema: z.ZodTypeAny): JsonSchema7Type { /** * 使用zod-to-json-schema库转换Zod schema为JSON Schema。 diff --git a/src/services/ai-review.ts b/src/services/ai-review.ts index b9f2fb0..b5fd680 100644 --- a/src/services/ai-review.ts +++ b/src/services/ai-review.ts @@ -1,15 +1,10 @@ -import OpenAI from 'openai'; import config from '../config'; -import { logger } from '../utils/logger'; +import { llmGateway } from '../llm/gateway'; +import { LLMMessage } from '../llm/types'; import { withGlobalPrompt } from '../utils/global-prompt'; +import { logger } from '../utils/logger'; import { PullRequestFile, giteaService } from './gitea'; -// 创建OpenAI客户端 -const openai = new OpenAI({ - baseURL: config.openai.baseUrl, - apiKey: config.openai.apiKey, -}); - // 代码审查结果接口 export interface CodeReviewResult { summary: string; @@ -198,23 +193,26 @@ export const aiReviewService = { 请根据以上信息,特别是考虑每个文件的完整内容和上下文,提供代码审查评价。如果没有发现明显问题,请简短说明代码质量良好即可。`; - const summaryPrompt = config.openai.customSummaryPrompt || defaultSummaryPrompt; + const summaryPrompt = config.review.customSummaryPrompt || defaultSummaryPrompt; // 获取总体评价 - const summaryResponse = await openai.chat.completions.create({ - model: config.openai.model, - messages: [ - { - role: 'system', - content: - withGlobalPrompt('你是一个专业的代码审查助手,擅长识别代码中的严重问题和bug。你会查看代码的完整上下文,而不是为了评论而评论。如无明显问题,应给予简短肯定。', config.openai.globalPrompt), - }, - { role: 'user', content: summaryPrompt }, - ], + const messages: LLMMessage[] = [ + { + role: 'system', + content: withGlobalPrompt( + '你是一个专业的代码审查助手,擅长识别代码中的严重问题和bug。你会查看代码的完整上下文,而不是为了评论而评论。如无明显问题,应给予简短肯定。', + config.review.globalPrompt + ), + }, + { role: 'user', content: summaryPrompt }, + ]; + + const summaryResponse = await llmGateway.chatForRole('legacy', { + messages, temperature: 0.1, }); - const summary = summaryResponse.choices[0]?.message.content || '无法生成代码审查摘要'; + const summary = summaryResponse.content || '无法生成代码审查摘要'; return summary; } catch (error: any) { logger.error('生成总体评价失败:', error); @@ -270,24 +268,27 @@ export const aiReviewService = { ] 只返回JSON数组,不要有其他文本。`; - const filePrompt = config.openai.customLineCommentPrompt || defaultFilePrompt; + const filePrompt = config.review.customLineCommentPrompt || defaultFilePrompt; // 获取行级评论 - const lineResponse = await openai.chat.completions.create({ - model: config.openai.model, - messages: [ - { - role: 'system', - content: - withGlobalPrompt('你是一个谨慎的代码审查助手,只对有明显bug或严重问题的代码行提供评论。大多数情况下,如果代码没有严重问题,你应该返回空数组。请以JSON格式返回结果。', config.openai.globalPrompt), - }, - { role: 'user', content: filePrompt }, - ], + const messages: LLMMessage[] = [ + { + role: 'system', + content: withGlobalPrompt( + '你是一个谨慎的代码审查助手,只对有明显bug或严重问题的代码行提供评论。大多数情况下,如果代码没有严重问题,你应该返回空数组。请以JSON格式返回结果。', + config.review.globalPrompt + ), + }, + { role: 'user', content: filePrompt }, + ]; + + const lineResponse = await llmGateway.chatForRole('legacy', { + messages, temperature: 0.1, - response_format: { type: 'json_object' }, + responseFormat: 'json', }); - const content = lineResponse.choices[0]?.message.content; + const content = lineResponse.content; if (!content) continue; try {