From fdfd49be63a2344fe69722a36562481496979c8a Mon Sep 17 00:00:00 2001 From: jeffusion Date: Thu, 5 Mar 2026 23:18:33 +0800 Subject: [PATCH] refactor(ui): use tokenlens as sole model source, remove provider listModels MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove the per-provider listModels API (GET /providers/:id/models) and all four provider implementations (OpenAI Compatible, OpenAI Responses, Anthropic, Gemini). ModelCombobox now only shows tokenlens suggestions (tagged '推荐') plus free-form custom input — no more unfiltered 'API' models from provider SDKs. Fixes: switching provider type in ProviderDialog no longer shows stale models from the original provider's API. Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-opencode) --- frontend/src/components/llm/ModelCombobox.tsx | 45 +++---------------- .../src/components/llm/ProviderDialog.tsx | 1 - .../src/components/llm/RoleAssignment.tsx | 1 - .../llm/__tests__/ModelCombobox.test.tsx | 40 +++++++---------- .../llm/__tests__/RoleAssignment.test.tsx | 7 ++- frontend/src/services/llmProviderService.ts | 5 --- src/controllers/llm-config.ts | 23 ---------- src/llm/providers/anthropic.ts | 9 ---- src/llm/providers/base.ts | 2 - src/llm/providers/gemini.ts | 11 ----- src/llm/providers/openai-compatible.ts | 5 --- src/llm/providers/openai-responses.ts | 5 --- 12 files changed, 29 insertions(+), 125 deletions(-) diff --git a/frontend/src/components/llm/ModelCombobox.tsx b/frontend/src/components/llm/ModelCombobox.tsx index 8db5c64..21e5c36 100644 --- a/frontend/src/components/llm/ModelCombobox.tsx +++ b/frontend/src/components/llm/ModelCombobox.tsx @@ -1,11 +1,10 @@ import { useState, useRef, useEffect } from 'react'; import { Input } from '@/components/ui/input'; import { useQuery } from '@tanstack/react-query'; -import { fetchModels, fetchModelSuggestions } from '@/services/llmProviderService'; +import { fetchModelSuggestions } from '@/services/llmProviderService'; import type { ProviderType } from '@/services/llmProviderService'; interface ModelComboboxProps { - providerId?: string | null; providerType?: ProviderType; value: string; onChange: (model: string) => void; @@ -15,7 +14,6 @@ interface ModelComboboxProps { } export function ModelCombobox({ - providerId, providerType, value, onChange, @@ -32,15 +30,6 @@ export function ModelCombobox({ setInputValue(value); }, [value]); - const { data: fetchedModels = [], isLoading } = useQuery({ - queryKey: ['llm-models', providerId, providerType], - queryFn: () => { - if (providerId) return fetchModels(providerId); - return Promise.resolve([]); - }, - enabled: !!providerId, - staleTime: 5 * 60 * 1000, - }); // Fetch dynamic model suggestions from backend (powered by models.dev) const { data: suggestions = {} } = useQuery({ @@ -49,11 +38,10 @@ export function ModelCombobox({ staleTime: 30 * 60 * 1000, // 30 min cache }); - // Build tagged model list: API > suggestions > custom input - const useApiFetched = fetchedModels.length > 0; + // Build model list: suggestions > custom input const suggestionModels = providerType ? suggestions[providerType] || [] : []; - type TaggedModel = { name: string; tag: 'API' | '推荐' | '自定义' }; + type TaggedModel = { name: string; tag: '推荐' | '自定义' }; const trimmedInput = inputValue.trim().toLowerCase(); @@ -61,23 +49,10 @@ export function ModelCombobox({ const result: TaggedModel[] = []; const seen = new Set(); - // API models first - if (useApiFetched) { - for (const m of fetchedModels) { - if (m.toLowerCase().includes(trimmedInput)) { - result.push({ name: m, tag: 'API' }); - seen.add(m.toLowerCase()); - } - } - } - - // Suggestion models (only show when no API results, or as supplement) - if (!useApiFetched) { - for (const m of suggestionModels) { - if (!seen.has(m.toLowerCase()) && m.toLowerCase().includes(trimmedInput)) { - result.push({ name: m, tag: '推荐' }); - seen.add(m.toLowerCase()); - } + for (const m of suggestionModels) { + if (!seen.has(m.toLowerCase()) && m.toLowerCase().includes(trimmedInput)) { + result.push({ name: m, tag: '推荐' }); + seen.add(m.toLowerCase()); } } @@ -92,7 +67,6 @@ export function ModelCombobox({ const taggedModels = buildTaggedList(); const TAG_STYLES: Record = { - 'API': 'bg-emerald-500/15 text-emerald-400', '推荐': 'bg-blue-500/15 text-blue-400', '自定义': 'bg-amber-500/15 text-amber-400', }; @@ -132,11 +106,6 @@ export function ModelCombobox({ autoComplete="off" className="bg-zinc-900 border-white/10 text-white w-full pr-10" /> - {isLoading && ( -
-
-
- )}
{isOpen && !disabled && taggedModels.length > 0 && ( diff --git a/frontend/src/components/llm/ProviderDialog.tsx b/frontend/src/components/llm/ProviderDialog.tsx index b576824..950e725 100644 --- a/frontend/src/components/llm/ProviderDialog.tsx +++ b/frontend/src/components/llm/ProviderDialog.tsx @@ -134,7 +134,6 @@ function ProviderDialogInner({ onOpenChange, provider }: Omit p.id === state.providerId)?.type} value={state.model} onChange={(model) => handleModelChange(role, model)} diff --git a/frontend/src/components/llm/__tests__/ModelCombobox.test.tsx b/frontend/src/components/llm/__tests__/ModelCombobox.test.tsx index 72e8c63..110c8ae 100644 --- a/frontend/src/components/llm/__tests__/ModelCombobox.test.tsx +++ b/frontend/src/components/llm/__tests__/ModelCombobox.test.tsx @@ -4,13 +4,11 @@ import userEvent from '@testing-library/user-event'; import type { ReactNode } from 'react'; import { describe, expect, it, vi } from 'vitest'; import { ModelCombobox } from '../ModelCombobox'; -import { fetchModels } from '@/services/llmProviderService'; vi.mock('@/services/llmProviderService', async () => { const actual = await vi.importActual('@/services/llmProviderService'); return { ...actual, - fetchModels: vi.fn(), fetchModelSuggestions: vi.fn().mockResolvedValue({ openai_compatible: ['gpt-4o', 'gpt-4o-mini', 'deepseek-chat'], openai_responses: ['gpt-4o', 'gpt-4o-mini', 'o3-mini'], @@ -30,32 +28,12 @@ function renderWithQuery(ui: ReactNode) { } describe('ModelCombobox', () => { - it('shows API tag and selects API model', async () => { - vi.mocked(fetchModels).mockResolvedValueOnce(['api-model-1']); + it('shows 推荐 models matching providerType and supports custom input', async () => { const user = userEvent.setup(); const onChange = vi.fn(); renderWithQuery( - , - ); - - const input = screen.getByPlaceholderText('选择或输入模型...'); - await user.click(input); - - expect(await screen.findByText('api-model-1')).toBeInTheDocument(); - expect(screen.getByText('API')).toBeInTheDocument(); - - await user.click(screen.getByText('api-model-1')); - expect(onChange).toHaveBeenCalledWith('api-model-1'); - }); - - it('shows 推荐 and 自定义 tags and supports custom input', async () => { - vi.mocked(fetchModels).mockResolvedValueOnce([]); - const user = userEvent.setup(); - const onChange = vi.fn(); - - renderWithQuery( - , + , ); const input = screen.getByPlaceholderText('选择或输入模型...'); @@ -74,4 +52,18 @@ describe('ModelCombobox', () => { expect(onChange).toHaveBeenCalledWith('my-custom-model'); }); }); + + it('shows different models when providerType changes', async () => { + const onChange = vi.fn(); + + renderWithQuery( + , + ); + + const input = screen.getByPlaceholderText('选择或输入模型...'); + await userEvent.click(input); + + expect(await screen.findByText('claude-sonnet-4-20250514')).toBeInTheDocument(); + expect(screen.queryByText('gpt-4o')).not.toBeInTheDocument(); + }); }); diff --git a/frontend/src/components/llm/__tests__/RoleAssignment.test.tsx b/frontend/src/components/llm/__tests__/RoleAssignment.test.tsx index 1ece5f3..03861ed 100644 --- a/frontend/src/components/llm/__tests__/RoleAssignment.test.tsx +++ b/frontend/src/components/llm/__tests__/RoleAssignment.test.tsx @@ -20,7 +20,12 @@ vi.mock('@/services/llmProviderService', async () => { fetchProviders: vi.fn(), fetchRoles: vi.fn(), setRole: vi.fn(), - fetchModels: vi.fn().mockResolvedValue([]), + fetchModelSuggestions: vi.fn().mockResolvedValue({ + openai_compatible: ['gpt-4o', 'gpt-4o-mini'], + openai_responses: ['gpt-4o', 'gpt-4o-mini'], + anthropic: ['claude-sonnet-4-20250514'], + gemini: ['gemini-2.5-pro'], + }), }; }); diff --git a/frontend/src/services/llmProviderService.ts b/frontend/src/services/llmProviderService.ts index ec0eaca..375fe2e 100644 --- a/frontend/src/services/llmProviderService.ts +++ b/frontend/src/services/llmProviderService.ts @@ -89,8 +89,3 @@ export const testProvider = async (id: string): Promise => { const response = await api.post(`/llm/providers/${id}/test`); return response.data; }; - -export const fetchModels = async (id: string): Promise => { - const response = await api.get<{ models: string[] }>(`/llm/providers/${id}/models`); - return response.data.models; -}; diff --git a/src/controllers/llm-config.ts b/src/controllers/llm-config.ts index 366bc9c..bb40ea8 100644 --- a/src/controllers/llm-config.ts +++ b/src/controllers/llm-config.ts @@ -55,29 +55,6 @@ llmConfigRouter.get('/providers/:id', (c) => { }); }); -llmConfigRouter.get('/providers/:id/models', async (c) => { - const id = c.req.param('id'); - const provider = providerRepo.getById(id); - if (!provider) return c.json({ message: 'Provider not found' }, 404); - - if (!secretRepo.has(id)) { - return c.json({ message: 'No API key configured' }, 400); - } - - try { - llmGateway.invalidateProvider(id); - const providerInstance = llmGateway.getProviderInstance(id); - if (!providerInstance.listModels) { - return c.json({ message: 'This provider does not support listing models' }, 501); - } - - const models = await providerInstance.listModels(); - return c.json({ models }); - } catch (error: any) { - return c.json({ message: error.message || 'Failed to fetch models' }, 500); - } -}); - llmConfigRouter.post('/providers', async (c) => { const body = await c.req.json<{ name: string; diff --git a/src/llm/providers/anthropic.ts b/src/llm/providers/anthropic.ts index 78adf4d..56b00d1 100644 --- a/src/llm/providers/anthropic.ts +++ b/src/llm/providers/anthropic.ts @@ -220,15 +220,6 @@ class AnthropicProvider implements LLMProvider { } } - async listModels(): Promise { - const response = await this.client.models.list(); - const models: string[] = []; - for await (const page of response.iterPages()) { - models.push(...page.data.map((model) => model.id)); - } - return models; - } - private wrapError(error: unknown): Error { if (error instanceof Anthropic.APIError) { if (error.status === 401) return new LLMAuthError(TYPE, error.message); diff --git a/src/llm/providers/base.ts b/src/llm/providers/base.ts index 73fc093..960ee8c 100644 --- a/src/llm/providers/base.ts +++ b/src/llm/providers/base.ts @@ -33,8 +33,6 @@ export interface LLMProvider { /** Optional: embedding interface (only for providers that support it). */ embed?(texts: string[]): Promise; - /** Optional: list available models for this provider. */ - listModels?(): Promise; } // --------------------------------------------------------------------------- diff --git a/src/llm/providers/gemini.ts b/src/llm/providers/gemini.ts index 1c6a30d..2429046 100644 --- a/src/llm/providers/gemini.ts +++ b/src/llm/providers/gemini.ts @@ -203,17 +203,6 @@ class GeminiProvider implements LLMProvider { } } - async listModels(): Promise { - const pager = await this.genAI.models.list(); - const models: string[] = []; - for await (const model of pager) { - if (model.name) { - models.push(model.name.replace('models/', '')); - } - } - return models; - } - private wrapError(error: unknown): Error { if (error instanceof Error) { const msg = error.message; diff --git a/src/llm/providers/openai-compatible.ts b/src/llm/providers/openai-compatible.ts index e40ff60..ebe57f3 100644 --- a/src/llm/providers/openai-compatible.ts +++ b/src/llm/providers/openai-compatible.ts @@ -143,11 +143,6 @@ class OpenAICompatibleProvider implements LLMProvider { } } - async listModels(): Promise { - const response = await this.client.models.list(); - return response.data.map((model) => model.id); - } - private wrapError(error: unknown): Error { if (error instanceof OpenAI.APIError) { if (error.status === 401) return new LLMAuthError(TYPE, error.message); diff --git a/src/llm/providers/openai-responses.ts b/src/llm/providers/openai-responses.ts index 26a6079..88425d8 100644 --- a/src/llm/providers/openai-responses.ts +++ b/src/llm/providers/openai-responses.ts @@ -182,11 +182,6 @@ class OpenAIResponsesProvider implements LLMProvider { } } - async listModels(): Promise { - const response = await this.client.models.list(); - return response.data.map((model) => model.id); - } - private wrapError(error: unknown): Error { if (error instanceof OpenAI.APIError) { if (error.status === 401) return new LLMAuthError(TYPE, error.message);