diff --git a/frontend/src/components/llm/ModelCombobox.tsx b/frontend/src/components/llm/ModelCombobox.tsx index 6c89370..8db5c64 100644 --- a/frontend/src/components/llm/ModelCombobox.tsx +++ b/frontend/src/components/llm/ModelCombobox.tsx @@ -1,7 +1,7 @@ import { useState, useRef, useEffect } from 'react'; import { Input } from '@/components/ui/input'; import { useQuery } from '@tanstack/react-query'; -import { fetchModels, MODEL_SUGGESTIONS } from '@/services/llmProviderService'; +import { fetchModels, fetchModelSuggestions } from '@/services/llmProviderService'; import type { ProviderType } from '@/services/llmProviderService'; interface ModelComboboxProps { @@ -42,9 +42,16 @@ export function ModelCombobox({ staleTime: 5 * 60 * 1000, }); + // Fetch dynamic model suggestions from backend (powered by models.dev) + const { data: suggestions = {} } = useQuery({ + queryKey: ['llm-model-suggestions'], + queryFn: fetchModelSuggestions, + staleTime: 30 * 60 * 1000, // 30 min cache + }); + // Build tagged model list: API > suggestions > custom input const useApiFetched = fetchedModels.length > 0; - const suggestionModels = providerType ? MODEL_SUGGESTIONS[providerType] || [] : []; + const suggestionModels = providerType ? suggestions[providerType] || [] : []; type TaggedModel = { name: string; tag: 'API' | '推荐' | '自定义' }; diff --git a/frontend/src/components/llm/__tests__/ModelCombobox.test.tsx b/frontend/src/components/llm/__tests__/ModelCombobox.test.tsx index 25ab733..72e8c63 100644 --- a/frontend/src/components/llm/__tests__/ModelCombobox.test.tsx +++ b/frontend/src/components/llm/__tests__/ModelCombobox.test.tsx @@ -11,6 +11,12 @@ vi.mock('@/services/llmProviderService', async () => { return { ...actual, fetchModels: vi.fn(), + fetchModelSuggestions: vi.fn().mockResolvedValue({ + openai_compatible: ['gpt-4o', 'gpt-4o-mini', 'deepseek-chat'], + openai_responses: ['gpt-4o', 'gpt-4o-mini', 'o3-mini'], + anthropic: ['claude-sonnet-4-20250514', 'claude-3-5-haiku-20241022'], + gemini: ['gemini-2.5-pro', 'gemini-2.5-flash', 'gemini-2.0-flash'], + }), }; }); diff --git a/frontend/src/services/llmProviderService.ts b/frontend/src/services/llmProviderService.ts index 4b7118e..ec0eaca 100644 --- a/frontend/src/services/llmProviderService.ts +++ b/frontend/src/services/llmProviderService.ts @@ -31,13 +31,23 @@ export interface TestResult { error?: string; } -export const MODEL_SUGGESTIONS: Record = { - openai_compatible: ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo', 'deepseek-chat', 'qwen-plus'], - openai_responses: ['gpt-4o', 'gpt-4o-mini', 'gpt-4.1', 'gpt-4.1-mini', 'o3-mini'], - anthropic: ['claude-sonnet-4-20250514', 'claude-3-5-haiku-20241022', 'claude-opus-4-20250514'], +/** Fallback suggestions when API is unavailable (e.g. catalog not loaded yet). */ +const FALLBACK_SUGGESTIONS: Record = { + openai_compatible: ['gpt-4o', 'gpt-4o-mini', 'deepseek-chat'], + openai_responses: ['gpt-4o', 'gpt-4o-mini', 'o3-mini'], + anthropic: ['claude-sonnet-4-20250514', 'claude-3-5-haiku-20241022'], gemini: ['gemini-2.5-pro', 'gemini-2.5-flash', 'gemini-2.0-flash'], }; +export const fetchModelSuggestions = async (): Promise> => { + try { + const response = await api.get>('/llm/model-suggestions'); + return response.data; + } catch { + return FALLBACK_SUGGESTIONS; + } +}; + export const fetchProviders = async (): Promise => { const response = await api.get('/llm/providers'); return response.data; diff --git a/src/controllers/llm-config.ts b/src/controllers/llm-config.ts index 8e5c703..366bc9c 100644 --- a/src/controllers/llm-config.ts +++ b/src/controllers/llm-config.ts @@ -14,6 +14,7 @@ import { secretRepo } from '../db/repositories/secret-repo'; import { settingsRepo } from '../db/repositories/settings-repo'; import { llmGateway } from '../llm/gateway'; import { MODEL_ROLES } from '../llm/types'; +import { tokenCounter } from '../review/context/token-counter'; export const llmConfigRouter = new Hono(); @@ -289,6 +290,39 @@ llmConfigRouter.post('/providers/:id/test', async (c) => { } }); +// ── Model Suggestions (from models.dev via tokenlens) ─────────────────── + +/** + * Map our ProviderType to models.dev provider keys. + * openai_compatible is special: it aggregates multiple providers since + * users often point compatible endpoints at DeepSeek, Qwen, etc. + */ +const PROVIDER_TYPE_TO_CATALOG_KEYS: Record = { + openai_compatible: ['openai', 'deepseek', 'qwen'], + openai_responses: ['openai'], + anthropic: ['anthropic'], + gemini: ['google'], +}; + +llmConfigRouter.get('/model-suggestions', async (c) => { + // Ensure catalog is loaded (lazy init on first request) + if (!tokenCounter.hasCatalog) { + await tokenCounter.refreshCatalog(); + } + + const result: Record = {}; + + for (const [providerType, catalogKeys] of Object.entries(PROVIDER_TYPE_TO_CATALOG_KEYS)) { + const models: string[] = []; + for (const key of catalogKeys) { + models.push(...tokenCounter.getModelSuggestions(key)); + } + result[providerType] = models; + } + + return c.json(result); +}); + // ── System Settings ───────────────────────────────────────────────────── llmConfigRouter.get('/settings', (c) => {