refactor(ui): use tokenlens as sole model source, remove provider listModels

Remove the per-provider listModels API (GET /providers/:id/models) and all
four provider implementations (OpenAI Compatible, OpenAI Responses, Anthropic,
Gemini). ModelCombobox now only shows tokenlens suggestions (tagged '推荐') plus
free-form custom input — no more unfiltered 'API' models from provider SDKs.

Fixes: switching provider type in ProviderDialog no longer shows stale models
from the original provider's API.

Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-opencode)
This commit is contained in:
jeffusion
2026-03-05 23:18:33 +08:00
committed by 路遥知码力
parent 71bd310459
commit fdfd49be63
12 changed files with 29 additions and 125 deletions

View File

@@ -1,11 +1,10 @@
import { useState, useRef, useEffect } from 'react';
import { Input } from '@/components/ui/input';
import { useQuery } from '@tanstack/react-query';
import { fetchModels, fetchModelSuggestions } from '@/services/llmProviderService';
import { fetchModelSuggestions } from '@/services/llmProviderService';
import type { ProviderType } from '@/services/llmProviderService';
interface ModelComboboxProps {
providerId?: string | null;
providerType?: ProviderType;
value: string;
onChange: (model: string) => void;
@@ -15,7 +14,6 @@ interface ModelComboboxProps {
}
export function ModelCombobox({
providerId,
providerType,
value,
onChange,
@@ -32,15 +30,6 @@ export function ModelCombobox({
setInputValue(value);
}, [value]);
const { data: fetchedModels = [], isLoading } = useQuery({
queryKey: ['llm-models', providerId, providerType],
queryFn: () => {
if (providerId) return fetchModels(providerId);
return Promise.resolve([]);
},
enabled: !!providerId,
staleTime: 5 * 60 * 1000,
});
// Fetch dynamic model suggestions from backend (powered by models.dev)
const { data: suggestions = {} } = useQuery({
@@ -49,11 +38,10 @@ export function ModelCombobox({
staleTime: 30 * 60 * 1000, // 30 min cache
});
// Build tagged model list: API > suggestions > custom input
const useApiFetched = fetchedModels.length > 0;
// Build model list: suggestions > custom input
const suggestionModels = providerType ? suggestions[providerType] || [] : [];
type TaggedModel = { name: string; tag: 'API' | '推荐' | '自定义' };
type TaggedModel = { name: string; tag: '推荐' | '自定义' };
const trimmedInput = inputValue.trim().toLowerCase();
@@ -61,23 +49,10 @@ export function ModelCombobox({
const result: TaggedModel[] = [];
const seen = new Set<string>();
// API models first
if (useApiFetched) {
for (const m of fetchedModels) {
if (m.toLowerCase().includes(trimmedInput)) {
result.push({ name: m, tag: 'API' });
seen.add(m.toLowerCase());
}
}
}
// Suggestion models (only show when no API results, or as supplement)
if (!useApiFetched) {
for (const m of suggestionModels) {
if (!seen.has(m.toLowerCase()) && m.toLowerCase().includes(trimmedInput)) {
result.push({ name: m, tag: '推荐' });
seen.add(m.toLowerCase());
}
for (const m of suggestionModels) {
if (!seen.has(m.toLowerCase()) && m.toLowerCase().includes(trimmedInput)) {
result.push({ name: m, tag: '推荐' });
seen.add(m.toLowerCase());
}
}
@@ -92,7 +67,6 @@ export function ModelCombobox({
const taggedModels = buildTaggedList();
const TAG_STYLES: Record<string, string> = {
'API': 'bg-emerald-500/15 text-emerald-400',
'推荐': 'bg-blue-500/15 text-blue-400',
'自定义': 'bg-amber-500/15 text-amber-400',
};
@@ -132,11 +106,6 @@ export function ModelCombobox({
autoComplete="off"
className="bg-zinc-900 border-white/10 text-white w-full pr-10"
/>
{isLoading && (
<div className="absolute right-3 top-1/2 -translate-y-1/2">
<div className="w-4 h-4 border-2 border-primary/30 border-t-primary rounded-full animate-spin" />
</div>
)}
</div>
{isOpen && !disabled && taggedModels.length > 0 && (

View File

@@ -134,7 +134,6 @@ function ProviderDialogInner({ onOpenChange, provider }: Omit<ProviderDialogProp
<div className="space-y-2">
<Label htmlFor="defaultModel"> <span className="text-red-500">*</span></Label>
<ModelCombobox
providerId={provider?.hasKey ? provider.id : null}
providerType={type}
value={defaultModel}
onChange={setDefaultModel}

View File

@@ -182,7 +182,6 @@ export function RoleAssignment() {
<div className="flex-1 w-full space-y-1">
<Label className="text-xs text-zinc-400">使</Label>
<ModelCombobox
providerId={state.providerId}
providerType={providers.find(p => p.id === state.providerId)?.type}
value={state.model}
onChange={(model) => handleModelChange(role, model)}

View File

@@ -4,13 +4,11 @@ import userEvent from '@testing-library/user-event';
import type { ReactNode } from 'react';
import { describe, expect, it, vi } from 'vitest';
import { ModelCombobox } from '../ModelCombobox';
import { fetchModels } from '@/services/llmProviderService';
vi.mock('@/services/llmProviderService', async () => {
const actual = await vi.importActual<typeof import('@/services/llmProviderService')>('@/services/llmProviderService');
return {
...actual,
fetchModels: vi.fn(),
fetchModelSuggestions: vi.fn().mockResolvedValue({
openai_compatible: ['gpt-4o', 'gpt-4o-mini', 'deepseek-chat'],
openai_responses: ['gpt-4o', 'gpt-4o-mini', 'o3-mini'],
@@ -30,32 +28,12 @@ function renderWithQuery(ui: ReactNode) {
}
describe('ModelCombobox', () => {
it('shows API tag and selects API model', async () => {
vi.mocked(fetchModels).mockResolvedValueOnce(['api-model-1']);
it('shows 推荐 models matching providerType and supports custom input', async () => {
const user = userEvent.setup();
const onChange = vi.fn();
renderWithQuery(
<ModelCombobox providerId="p1" providerType="openai_compatible" value="" onChange={onChange} />,
);
const input = screen.getByPlaceholderText('选择或输入模型...');
await user.click(input);
expect(await screen.findByText('api-model-1')).toBeInTheDocument();
expect(screen.getByText('API')).toBeInTheDocument();
await user.click(screen.getByText('api-model-1'));
expect(onChange).toHaveBeenCalledWith('api-model-1');
});
it('shows 推荐 and 自定义 tags and supports custom input', async () => {
vi.mocked(fetchModels).mockResolvedValueOnce([]);
const user = userEvent.setup();
const onChange = vi.fn();
renderWithQuery(
<ModelCombobox providerId="p2" providerType="openai_compatible" value="" onChange={onChange} />,
<ModelCombobox providerType="openai_compatible" value="" onChange={onChange} />,
);
const input = screen.getByPlaceholderText('选择或输入模型...');
@@ -74,4 +52,18 @@ describe('ModelCombobox', () => {
expect(onChange).toHaveBeenCalledWith('my-custom-model');
});
});
it('shows different models when providerType changes', async () => {
const onChange = vi.fn();
renderWithQuery(
<ModelCombobox providerType="anthropic" value="" onChange={onChange} />,
);
const input = screen.getByPlaceholderText('选择或输入模型...');
await userEvent.click(input);
expect(await screen.findByText('claude-sonnet-4-20250514')).toBeInTheDocument();
expect(screen.queryByText('gpt-4o')).not.toBeInTheDocument();
});
});

View File

@@ -20,7 +20,12 @@ vi.mock('@/services/llmProviderService', async () => {
fetchProviders: vi.fn(),
fetchRoles: vi.fn(),
setRole: vi.fn(),
fetchModels: vi.fn().mockResolvedValue([]),
fetchModelSuggestions: vi.fn().mockResolvedValue({
openai_compatible: ['gpt-4o', 'gpt-4o-mini'],
openai_responses: ['gpt-4o', 'gpt-4o-mini'],
anthropic: ['claude-sonnet-4-20250514'],
gemini: ['gemini-2.5-pro'],
}),
};
});

View File

@@ -89,8 +89,3 @@ export const testProvider = async (id: string): Promise<TestResult> => {
const response = await api.post<TestResult>(`/llm/providers/${id}/test`);
return response.data;
};
export const fetchModels = async (id: string): Promise<string[]> => {
const response = await api.get<{ models: string[] }>(`/llm/providers/${id}/models`);
return response.data.models;
};

View File

@@ -55,29 +55,6 @@ llmConfigRouter.get('/providers/:id', (c) => {
});
});
llmConfigRouter.get('/providers/:id/models', async (c) => {
const id = c.req.param('id');
const provider = providerRepo.getById(id);
if (!provider) return c.json({ message: 'Provider not found' }, 404);
if (!secretRepo.has(id)) {
return c.json({ message: 'No API key configured' }, 400);
}
try {
llmGateway.invalidateProvider(id);
const providerInstance = llmGateway.getProviderInstance(id);
if (!providerInstance.listModels) {
return c.json({ message: 'This provider does not support listing models' }, 501);
}
const models = await providerInstance.listModels();
return c.json({ models });
} catch (error: any) {
return c.json({ message: error.message || 'Failed to fetch models' }, 500);
}
});
llmConfigRouter.post('/providers', async (c) => {
const body = await c.req.json<{
name: string;

View File

@@ -220,15 +220,6 @@ class AnthropicProvider implements LLMProvider {
}
}
async listModels(): Promise<string[]> {
const response = await this.client.models.list();
const models: string[] = [];
for await (const page of response.iterPages()) {
models.push(...page.data.map((model) => model.id));
}
return models;
}
private wrapError(error: unknown): Error {
if (error instanceof Anthropic.APIError) {
if (error.status === 401) return new LLMAuthError(TYPE, error.message);

View File

@@ -33,8 +33,6 @@ export interface LLMProvider {
/** Optional: embedding interface (only for providers that support it). */
embed?(texts: string[]): Promise<number[][]>;
/** Optional: list available models for this provider. */
listModels?(): Promise<string[]>;
}
// ---------------------------------------------------------------------------

View File

@@ -203,17 +203,6 @@ class GeminiProvider implements LLMProvider {
}
}
async listModels(): Promise<string[]> {
const pager = await this.genAI.models.list();
const models: string[] = [];
for await (const model of pager) {
if (model.name) {
models.push(model.name.replace('models/', ''));
}
}
return models;
}
private wrapError(error: unknown): Error {
if (error instanceof Error) {
const msg = error.message;

View File

@@ -143,11 +143,6 @@ class OpenAICompatibleProvider implements LLMProvider {
}
}
async listModels(): Promise<string[]> {
const response = await this.client.models.list();
return response.data.map((model) => model.id);
}
private wrapError(error: unknown): Error {
if (error instanceof OpenAI.APIError) {
if (error.status === 401) return new LLMAuthError(TYPE, error.message);

View File

@@ -182,11 +182,6 @@ class OpenAIResponsesProvider implements LLMProvider {
}
}
async listModels(): Promise<string[]> {
const response = await this.client.models.list();
return response.data.map((model) => model.id);
}
private wrapError(error: unknown): Error {
if (error instanceof OpenAI.APIError) {
if (error.status === 401) return new LLMAuthError(TYPE, error.message);