mirror of
https://github.com/hicccc77/WeFlow.git
synced 2026-05-16 15:10:36 +00:00
Merge pull request #966 from Jasonzhu1207/main
feat: Disable AI Output Truncation & Optimize max_tokens Settings
This commit is contained in:
@@ -225,7 +225,7 @@ export class ConfigService {
|
||||
aiModelApiBaseUrl: '',
|
||||
aiModelApiKey: '',
|
||||
aiModelApiModel: 'gpt-4o-mini',
|
||||
aiModelApiMaxTokens: 200,
|
||||
aiModelApiMaxTokens: 1024,
|
||||
aiInsightEnabled: false,
|
||||
aiInsightApiBaseUrl: '',
|
||||
aiInsightApiKey: '',
|
||||
|
||||
@@ -36,9 +36,9 @@ const SILENCE_SCAN_INITIAL_DELAY_MS = 3 * 60 * 1000
|
||||
|
||||
/** 单次 API 请求超时(毫秒) */
|
||||
const API_TIMEOUT_MS = 45_000
|
||||
const API_MAX_TOKENS_DEFAULT = 200
|
||||
const API_MAX_TOKENS_DEFAULT = 1024
|
||||
const API_MAX_TOKENS_MIN = 1
|
||||
const API_MAX_TOKENS_MAX = 65_535
|
||||
const API_MAX_TOKENS_MAX = 2_000_000
|
||||
const API_TEMPERATURE = 0.7
|
||||
const INSIGHT_NOTIFICATION_AVATAR_URL = './assets/insight/AI_Insight.png'
|
||||
|
||||
@@ -582,7 +582,7 @@ ${topMentionText}
|
||||
25_000,
|
||||
maxTokens
|
||||
)
|
||||
const insight = result.trim().slice(0, 400)
|
||||
const insight = result.trim()
|
||||
if (!insight) return { success: false, message: '模型返回为空' }
|
||||
return { success: true, message: '生成成功', insight }
|
||||
} catch (error) {
|
||||
@@ -1214,7 +1214,7 @@ ${topMentionText}
|
||||
}
|
||||
if (!this.isEnabled()) return
|
||||
|
||||
const insight = result.slice(0, 120)
|
||||
const insight = result.trim()
|
||||
const notifTitle = `见解 · ${resolvedDisplayName}`
|
||||
const recordLog: InsightRecordLog = {
|
||||
endpoint,
|
||||
|
||||
@@ -292,7 +292,7 @@ function SettingsPage({ onClose }: SettingsPageProps = {}) {
|
||||
const [aiModelApiBaseUrl, setAiModelApiBaseUrl] = useState('')
|
||||
const [aiModelApiKey, setAiModelApiKey] = useState('')
|
||||
const [aiModelApiModel, setAiModelApiModel] = useState('gpt-4o-mini')
|
||||
const [aiModelApiMaxTokens, setAiModelApiMaxTokens] = useState(200)
|
||||
const [aiModelApiMaxTokens, setAiModelApiMaxTokens] = useState(1024)
|
||||
const [aiInsightSilenceDays, setAiInsightSilenceDays] = useState(3)
|
||||
const [aiInsightAllowContext, setAiInsightAllowContext] = useState(false)
|
||||
const [aiInsightAllowMomentsContext, setAiInsightAllowMomentsContext] = useState(false)
|
||||
@@ -3030,18 +3030,18 @@ function SettingsPage({ onClose }: SettingsPageProps = {}) {
|
||||
<div className="form-group">
|
||||
<label>通用 Max Tokens</label>
|
||||
<span className="form-hint">
|
||||
设置单次请求的最大输出 token 数量,见解与足迹共享该值。默认 <code>200</code>。
|
||||
设置单次请求的最大输出 token 数量,见解与足迹共享该值。默认 <code>1024</code>。
|
||||
</span>
|
||||
<input
|
||||
type="number"
|
||||
className="field-input"
|
||||
value={aiModelApiMaxTokens}
|
||||
min={1}
|
||||
max={65535}
|
||||
max={2000000}
|
||||
step={1}
|
||||
onChange={(e) => {
|
||||
const parsed = parseInt(e.target.value, 10)
|
||||
const val = Math.min(65535, Math.max(1, Number.isFinite(parsed) ? parsed : 200))
|
||||
const val = Math.min(2000000, Math.max(1, Number.isFinite(parsed) ? parsed : 1024))
|
||||
setAiModelApiMaxTokens(val)
|
||||
scheduleConfigSave('aiModelApiMaxTokens', () => configService.setAiModelApiMaxTokens(val))
|
||||
}}
|
||||
|
||||
@@ -1903,13 +1903,13 @@ export async function getAiModelApiMaxTokens(): Promise<number> {
|
||||
if (typeof value === 'number' && Number.isFinite(value) && value > 0) {
|
||||
return Math.floor(value)
|
||||
}
|
||||
return 200
|
||||
return 1024
|
||||
}
|
||||
|
||||
export async function setAiModelApiMaxTokens(maxTokens: number): Promise<void> {
|
||||
const normalized = Number.isFinite(maxTokens)
|
||||
? Math.min(65535, Math.max(1, Math.floor(maxTokens)))
|
||||
: 200
|
||||
? Math.min(2000000, Math.max(1, Math.floor(maxTokens)))
|
||||
: 1024
|
||||
await config.set(CONFIG_KEYS.AI_MODEL_API_MAX_TOKENS, normalized)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user