feat(config): add global prompt setting injected into all LLM calls

Add GLOBAL_PROMPT config field that appends user-defined instructions to
every LLM system message across all 9 call sites (legacy engine, agent
specialist, reflexion, critic, and debate orchestrator).

Configured via admin dashboard (auto-rendered from CONFIG_FIELDS metadata)
or GLOBAL_PROMPT env var. Example use: "请始终使用中文回复".

Changes:
- Add GLOBAL_PROMPT to Zod schema, AppConfig interface, and buildConfig
- Add CONFIG_FIELDS metadata (group: openai, type: text)
- Add getEffectiveValue switch case
- Add withGlobalPrompt() helper in src/utils/global-prompt.ts
- Inject into all LLM call sites via withGlobalPrompt wrapper

Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-opencode)
This commit is contained in:
jeffusion
2026-03-04 17:30:31 +08:00
committed by 路遥知码力
parent 98e5048f2c
commit afd568588d
9 changed files with 40 additions and 7 deletions

View File

@@ -28,6 +28,7 @@ const SCHEMA_KEYS = [
'OPENAI_MODEL',
'CUSTOM_SUMMARY_PROMPT',
'CUSTOM_LINE_COMMENT_PROMPT',
'GLOBAL_PROMPT',
'FEISHU_WEBHOOK_URL',
'FEISHU_WEBHOOK_SECRET',
'PORT',

View File

@@ -46,6 +46,7 @@ const envSchema = z.object({
OPENAI_MODEL: z.string().default('gpt-4o-mini'),
CUSTOM_SUMMARY_PROMPT: z.string().optional(),
CUSTOM_LINE_COMMENT_PROMPT: z.string().optional(),
GLOBAL_PROMPT: z.string().optional(),
// Feishu
FEISHU_WEBHOOK_URL: z.preprocess(
@@ -118,6 +119,7 @@ export interface AppConfig {
model: string;
customSummaryPrompt: string | undefined;
customLineCommentPrompt: string | undefined;
globalPrompt: string | undefined;
};
feishu: {
webhookUrl: string | undefined;
@@ -271,6 +273,7 @@ class ConfigManager {
model: env.OPENAI_MODEL,
customSummaryPrompt: env.CUSTOM_SUMMARY_PROMPT,
customLineCommentPrompt: env.CUSTOM_LINE_COMMENT_PROMPT,
globalPrompt: env.GLOBAL_PROMPT,
},
feishu: {
webhookUrl: env.FEISHU_WEBHOOK_URL,

View File

@@ -159,6 +159,14 @@ export const CONFIG_FIELDS: ConfigFieldMeta[] = [
type: 'text',
sensitive: false,
},
{
envKey: 'GLOBAL_PROMPT',
group: 'openai',
label: '全局提示词',
description: '附加到所有 LLM 调用的系统提示词中(例如:"请始终使用中文回复"',
type: 'text',
sensitive: false,
},
// ── 飞书 ────────────────────────────────────────────────────────────────
{

View File

@@ -50,6 +50,8 @@ function getEffectiveValue(
return current.openai.customSummaryPrompt;
case 'CUSTOM_LINE_COMMENT_PROMPT':
return current.openai.customLineCommentPrompt;
case 'GLOBAL_PROMPT':
return current.openai.globalPrompt;
// Feishu
case 'FEISHU_WEBHOOK_URL':
return current.feishu.webhookUrl;

View File

@@ -1,5 +1,7 @@
import OpenAI from 'openai';
import { logger } from '../../utils/logger';
import { withGlobalPrompt } from '../../utils/global-prompt';
import config from '../../config';
import { Finding, ReviewContext } from '../types';
export interface CritiqueResult {
@@ -76,7 +78,7 @@ ${context.diff.slice(0, 3000)}
messages: [
{
role: 'system',
content: '你是严格的代码审查质量评估专家以高标准评估findings的质量。',
content: withGlobalPrompt('你是严格的代码审查质量评估专家以高标准评估findings的质量。', config.openai.globalPrompt),
},
{ role: 'user', content: prompt },
],
@@ -165,7 +167,7 @@ ${context.diff.slice(0, 2000)}
messages: [
{
role: 'system',
content: '你是代码审查质量评估专家。',
content: withGlobalPrompt('你是代码审查质量评估专家。', config.openai.globalPrompt),
},
{ role: 'user', content: prompt },
],

View File

@@ -1,5 +1,7 @@
import OpenAI from 'openai';
import { logger } from '../../utils/logger';
import { withGlobalPrompt } from '../../utils/global-prompt';
import config from '../../config';
import { Finding, FindingSeverity } from '../types';
import { SpecialistAgent } from './specialist-agent';
@@ -108,7 +110,7 @@ export class DebateOrchestrator {
messages: [
{
role: 'system',
content: `你是${agentName},从你的专业角度独立评估代码问题。`,
content: withGlobalPrompt(`你是${agentName},从你的专业角度独立评估代码问题。`, config.openai.globalPrompt),
},
{ role: 'user', content: prompt },
],
@@ -185,7 +187,7 @@ ${otherOpinions
messages: [
{
role: 'system',
content: `你是${agentName},根据同行意见重新评估,但也要坚持你的专业判断。`,
content: withGlobalPrompt(`你是${agentName},根据同行意见重新评估,但也要坚持你的专业判断。`, config.openai.globalPrompt),
},
{ role: 'user', content: prompt },
],

View File

@@ -1,6 +1,8 @@
import { createHash } from 'node:crypto';
import OpenAI from 'openai';
import { logger } from '../../utils/logger';
import { withGlobalPrompt } from '../../utils/global-prompt';
import config from '../../config';
import { LearningSystem } from '../learning/learning-system';
import { findingResponseSchema } from '../schema/finding-schema';
import { ToolRegistry } from '../tools/registry';
@@ -147,7 +149,7 @@ ${context.diff.slice(0, 3000)}
messages: [
{
role: 'system',
content: `你是${this.agentName},根据批评反馈改进审查结果。`,
content: withGlobalPrompt(`你是${this.agentName},根据批评反馈改进审查结果。`, config.openai.globalPrompt),
},
{ role: 'user', content: prompt },
],

View File

@@ -1,6 +1,7 @@
import OpenAI from 'openai';
import config from '../config';
import { logger } from '../utils/logger';
import { withGlobalPrompt } from '../utils/global-prompt';
import { PullRequestFile, giteaService } from './gitea';
// 创建OpenAI客户端
@@ -206,7 +207,7 @@ export const aiReviewService = {
{
role: 'system',
content:
'你是一个专业的代码审查助手擅长识别代码中的严重问题和bug。你会查看代码的完整上下文而不是为了评论而评论。如无明显问题应给予简短肯定。',
withGlobalPrompt('你是一个专业的代码审查助手擅长识别代码中的严重问题和bug。你会查看代码的完整上下文而不是为了评论而评论。如无明显问题应给予简短肯定。', config.openai.globalPrompt),
},
{ role: 'user', content: summaryPrompt },
],
@@ -278,7 +279,7 @@ export const aiReviewService = {
{
role: 'system',
content:
'你是一个谨慎的代码审查助手只对有明显bug或严重问题的代码行提供评论。大多数情况下如果代码没有严重问题你应该返回空数组。请以JSON格式返回结果。',
withGlobalPrompt('你是一个谨慎的代码审查助手只对有明显bug或严重问题的代码行提供评论。大多数情况下如果代码没有严重问题你应该返回空数组。请以JSON格式返回结果。', config.openai.globalPrompt),
},
{ role: 'user', content: filePrompt },
],

View File

@@ -0,0 +1,12 @@
/**
* Helper to inject the global prompt into LLM system messages.
*
* If globalPrompt is non-empty, it is appended to the original system content
* separated by a blank line. Otherwise the original content is returned as-is.
*/
export function withGlobalPrompt(systemContent: string, globalPrompt: string | undefined): string {
if (!globalPrompt || globalPrompt.trim() === '') {
return systemContent;
}
return `${systemContent}\n\n${globalPrompt}`;
}