From 35d529c56fe10513c0cdb34af05b6d5d39520ecc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Fri, 20 Feb 2026 17:29:37 +0100 Subject: [PATCH 01/18] feat(opencode): add LiteLLM discovery module with /model/info and /models fallback --- packages/opencode/src/provider/litellm.ts | 251 ++++++++++++++++++++++ 1 file changed, 251 insertions(+) create mode 100644 packages/opencode/src/provider/litellm.ts diff --git a/packages/opencode/src/provider/litellm.ts b/packages/opencode/src/provider/litellm.ts new file mode 100644 index 000000000000..958da8f82a7f --- /dev/null +++ b/packages/opencode/src/provider/litellm.ts @@ -0,0 +1,251 @@ +import { Log } from "../util/log" +import { Env } from "../env" +import type { Provider } from "./provider" + +export namespace LiteLLM { + const log = Log.create({ service: "litellm" }) + + interface ModelInfoEntry { + model_name: string + litellm_params?: { + model?: string + [key: string]: unknown + } + model_info?: { + id?: string + input_cost_per_token?: number | null + output_cost_per_token?: number | null + cache_read_input_token_cost?: number | null + cache_creation_input_token_cost?: number | null + input_cost_per_token_above_200k_tokens?: number | null + output_cost_per_token_above_200k_tokens?: number | null + max_tokens?: number | null + max_input_tokens?: number | null + max_output_tokens?: number | null + supports_function_calling?: boolean | null + supports_vision?: boolean | null + supports_pdf_input?: boolean | null + supports_audio_input?: boolean | null + supports_audio_output?: boolean | null + supports_video_input?: boolean | null + supports_prompt_caching?: boolean | null + supports_reasoning?: boolean | null + supported_openai_params?: string[] | null + [key: string]: unknown + } + } + + const INTERLEAVED_MODELS = ["claude", "anthropic"] + + function isWildcard(name: string): boolean { + return name.includes("*") || name.includes("/*") + } + + function inferInterleaved( + underlyingModel: string | undefined, + ): Provider.Model["capabilities"]["interleaved"] { + if (!underlyingModel) return false + const lower = underlyingModel.toLowerCase() + if (INTERLEAVED_MODELS.some((m) => lower.includes(m))) return true + return false + } + + function costPerMillion(costPerToken: number | null | undefined): number { + if (!costPerToken) return 0 + return costPerToken * 1_000_000 + } + + function toModel(entry: ModelInfoEntry): Provider.Model | undefined { + if (isWildcard(entry.model_name)) return undefined + + const info = entry.model_info ?? {} + const underlyingModel = entry.litellm_params?.model + + const inputCost = costPerMillion(info.input_cost_per_token) + const outputCost = costPerMillion(info.output_cost_per_token) + const cacheReadCost = costPerMillion(info.cache_read_input_token_cost) + const cacheWriteCost = costPerMillion(info.cache_creation_input_token_cost) + + const hasOver200K = + info.input_cost_per_token_above_200k_tokens != null || + info.output_cost_per_token_above_200k_tokens != null + + const supportsVision = info.supports_vision === true + const supportsPdf = info.supports_pdf_input === true + const supportsTemperature = info.supported_openai_params?.includes("temperature") ?? true + + return { + id: entry.model_name, + providerID: "litellm", + name: entry.model_name, + api: { + id: entry.model_name, + url: "", + npm: "@ai-sdk/openai-compatible", + }, + status: "active", + headers: {}, + options: {}, + cost: { + input: inputCost, + output: outputCost, + cache: { + read: cacheReadCost, + write: cacheWriteCost, + }, + experimentalOver200K: hasOver200K + ? { + input: costPerMillion(info.input_cost_per_token_above_200k_tokens), + output: costPerMillion(info.output_cost_per_token_above_200k_tokens), + cache: { read: 0, write: 0 }, + } + : undefined, + }, + limit: { + context: (info.max_input_tokens ?? info.max_tokens ?? 128_000) as number, + output: (info.max_output_tokens ?? 8_192) as number, + }, + capabilities: { + temperature: supportsTemperature, + reasoning: info.supports_reasoning === true, + attachment: supportsVision || supportsPdf, + toolcall: info.supports_function_calling !== false, + input: { + text: true, + audio: info.supports_audio_input === true, + image: supportsVision, + video: info.supports_video_input === true, + pdf: supportsPdf, + }, + output: { + text: true, + audio: info.supports_audio_output === true, + image: false, + video: false, + pdf: false, + }, + interleaved: inferInterleaved(underlyingModel), + }, + release_date: "", + variants: {}, + } + } + + function toBasicModel(id: string): Provider.Model { + return { + id, + providerID: "litellm", + name: id, + api: { id, url: "", npm: "@ai-sdk/openai-compatible" }, + status: "active", + headers: {}, + options: {}, + cost: { input: 0, output: 0, cache: { read: 0, write: 0 } }, + limit: { context: 128_000, output: 8_192 }, + capabilities: { + temperature: true, + reasoning: false, + attachment: false, + toolcall: true, + input: { text: true, audio: false, image: false, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: false, + }, + release_date: "", + variants: {}, + } + } + + async function fetchModelInfo( + host: string, + headers: Record, + timeout: number, + ): Promise | undefined> { + const url = `${host}/model/info` + const response = await fetch(url, { + headers, + signal: AbortSignal.timeout(timeout), + }).catch(() => undefined) + + if (!response?.ok) return undefined + + const data = (await response.json()) as { data?: ModelInfoEntry[] } + const entries = data?.data + if (!Array.isArray(entries)) return undefined + + const models: Record = {} + for (const entry of entries) { + const model = toModel(entry) + if (model) models[model.id] = model + } + return Object.keys(models).length > 0 ? models : undefined + } + + async function fetchModelList( + host: string, + headers: Record, + timeout: number, + ): Promise> { + const url = `${host}/models` + const response = await fetch(url, { + headers, + signal: AbortSignal.timeout(timeout), + }).catch(() => undefined) + + if (!response?.ok) return {} + + const data = (await response.json()) as { data?: { id: string }[] } + const models: Record = {} + for (const item of data?.data ?? []) { + if (!item.id) continue + models[item.id] = toBasicModel(item.id) + } + return models + } + + export async function discover( + host: string, + options?: { + apiKey?: string + headers?: Record + timeout?: number + }, + ): Promise | undefined> { + const timeout = options?.timeout ?? Number(Env.get("LITELLM_TIMEOUT") ?? "5000") + const base = host.replace(/\/+$/, "") + + const headers: Record = { + "Content-Type": "application/json", + ...options?.headers, + } + if (options?.apiKey) { + headers["Authorization"] = `Bearer ${options.apiKey}` + } + + try { + // Try /model/info first for rich metadata, fall back to /models + const rich = await fetchModelInfo(base, headers, timeout) + if (rich) { + log.info("discovered models from LiteLLM /model/info", { + count: Object.keys(rich).length, + host, + }) + return rich + } + + const basic = await fetchModelList(base, headers, timeout) + if (Object.keys(basic).length > 0) { + log.info("discovered models from /models (fallback)", { + count: Object.keys(basic).length, + host, + }) + return basic + } + + return undefined + } catch (e) { + log.warn("LiteLLM model discovery error", { error: e, host }) + return undefined + } + } +} From f7cbcee47dfc5a61fcb832f7e98363213abb72bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Fri, 20 Feb 2026 17:30:16 +0100 Subject: [PATCH 02/18] feat(opencode): add litellm provider seeding and custom loader --- packages/opencode/src/provider/provider.ts | 77 ++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index 022ec3167956..b8d5b5e334a8 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -44,6 +44,7 @@ import { fromNodeProviderChain } from "@aws-sdk/credential-providers" import { GoogleAuth } from "google-auth-library" import { ProviderTransform } from "./transform" import { Installation } from "../installation" +import { LiteLLM } from "./litellm" export namespace Provider { const log = Log.create({ service: "provider" }) @@ -588,6 +589,67 @@ export namespace Provider { }, } }, + litellm: async (provider) => { + const config = await Config.get() + const providerConfig = config.provider?.["litellm"] + + const baseURL = + providerConfig?.options?.baseURL ?? + Env.get("LITELLM_HOST") ?? + Env.get("LITELLM_BASE_URL") ?? + "http://localhost:4000" + + const apiKey = await (async () => { + if (providerConfig?.options?.apiKey) return providerConfig.options.apiKey + const envKey = Env.get("LITELLM_API_KEY") + if (envKey) return envKey + const auth = await Auth.get("litellm") + if (auth?.type === "api") return auth.key + return undefined + })() + + const customHeaders = iife(() => { + const raw = Env.get("LITELLM_CUSTOM_HEADERS") + if (!raw) return {} + try { + return JSON.parse(raw) as Record + } catch { + return {} + } + }) + + const timeout = Number(Env.get("LITELLM_TIMEOUT") ?? "5000") + + const discovered = await LiteLLM.discover(baseURL, { + apiKey, + headers: customHeaders, + timeout, + }) + + if (discovered) { + for (const [modelID, model] of Object.entries(discovered)) { + if (!provider.models[modelID]) { + provider.models[modelID] = model + } + } + } + + const hasModels = Object.keys(provider.models).length > 0 + if (!hasModels) return { autoload: false } + + return { + autoload: true, + options: { + baseURL, + apiKey, + litellmProxy: true, + ...customHeaders, + }, + async getModel(sdk: any, modelID: string) { + return sdk.languageModel(modelID) + }, + } + }, } export const Model = z @@ -794,6 +856,21 @@ export namespace Provider { } } + // Seed LiteLLM provider when env vars exist but no entry in database + if ( + !database["litellm"] && + (Env.get("LITELLM_API_KEY") || Env.get("LITELLM_HOST") || Env.get("LITELLM_BASE_URL")) + ) { + database["litellm"] = { + id: "litellm", + name: "LiteLLM", + env: ["LITELLM_API_KEY"], + options: {}, + source: "custom", + models: {}, + } + } + function mergeProvider(providerID: string, provider: Partial) { const existing = providers[providerID] if (existing) { From 2a81a940df01f3ab944cccada6cc05ed8bc0c1f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Fri, 20 Feb 2026 17:30:31 +0100 Subject: [PATCH 03/18] feat(opencode): add litellm-specific reasoning transform variants --- packages/opencode/src/provider/transform.ts | 23 +++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index b659799c1b6f..58cbd91930dc 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -363,6 +363,29 @@ export namespace ProviderTransform { } if (id.includes("grok")) return {} + // LiteLLM proxied models: infer reasoning variant from the underlying model + // to avoid false-positive reasoning param injection for aliased models + if (model.providerID === "litellm" && model.capabilities.reasoning) { + const apiId = model.api.id.toLowerCase() + if (apiId.includes("claude") || apiId.includes("anthropic")) { + return { + high: { + thinking: { + type: "enabled", + budgetTokens: Math.min(16_000, Math.floor(model.limit.output / 2 - 1)), + }, + }, + max: { + thinking: { + type: "enabled", + budgetTokens: Math.min(31_999, model.limit.output - 1), + }, + }, + } + } + return Object.fromEntries(WIDELY_SUPPORTED_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }])) + } + switch (model.api.npm) { case "@openrouter/ai-sdk-provider": if (!model.id.includes("gpt") && !model.id.includes("gemini-3") && !model.id.includes("claude")) return {} From ce92937bf6ff73daa4ac5f5c363c84e8623d06eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Fri, 20 Feb 2026 17:30:43 +0100 Subject: [PATCH 04/18] fix(opencode): add explicit litellm providerID check for proxy detection --- packages/opencode/src/session/llm.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts index 4e42fb0d2ec7..60c06e615c93 100644 --- a/packages/opencode/src/session/llm.ts +++ b/packages/opencode/src/session/llm.ts @@ -156,6 +156,7 @@ export namespace LLM { // 1. Providers with "litellm" in their ID or API ID (auto-detected) // 2. Providers with explicit "litellmProxy: true" option (opt-in for custom gateways) const isLiteLLMProxy = + input.model.providerID === "litellm" || provider.options?.["litellmProxy"] === true || input.model.providerID.toLowerCase().includes("litellm") || input.model.api.id.toLowerCase().includes("litellm") From 99c312f5c2d484aa2ec4b041e62341345bfea2aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Fri, 20 Feb 2026 18:00:18 +0100 Subject: [PATCH 05/18] feat(opencode): allow interactive litellm provider configuration --- packages/opencode/src/provider/provider.ts | 18 +++++++++++------- .../opencode/src/server/routes/provider.ts | 13 ++++++++++++- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index b8d5b5e334a8..8bbf1cf335a9 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -593,6 +593,14 @@ export namespace Provider { const config = await Config.get() const providerConfig = config.provider?.["litellm"] + const hasEnv = !!(Env.get("LITELLM_API_KEY") || Env.get("LITELLM_HOST") || Env.get("LITELLM_BASE_URL")) + const hasConfig = !!providerConfig + const auth = await Auth.get("litellm") + const hasAuth = auth?.type === "api" + + // Skip discovery when there is no configuration at all + if (!hasEnv && !hasConfig && !hasAuth) return { autoload: false } + const baseURL = providerConfig?.options?.baseURL ?? Env.get("LITELLM_HOST") ?? @@ -603,8 +611,7 @@ export namespace Provider { if (providerConfig?.options?.apiKey) return providerConfig.options.apiKey const envKey = Env.get("LITELLM_API_KEY") if (envKey) return envKey - const auth = await Auth.get("litellm") - if (auth?.type === "api") return auth.key + if (hasAuth) return auth.key return undefined })() @@ -856,11 +863,8 @@ export namespace Provider { } } - // Seed LiteLLM provider when env vars exist but no entry in database - if ( - !database["litellm"] && - (Env.get("LITELLM_API_KEY") || Env.get("LITELLM_HOST") || Env.get("LITELLM_BASE_URL")) - ) { + // Seed LiteLLM provider so it is always available for interactive configuration + if (!database["litellm"]) { database["litellm"] = { id: "litellm", name: "LiteLLM", diff --git a/packages/opencode/src/server/routes/provider.ts b/packages/opencode/src/server/routes/provider.ts index 872b48be79dc..3f53f8d00ae0 100644 --- a/packages/opencode/src/server/routes/provider.ts +++ b/packages/opencode/src/server/routes/provider.ts @@ -40,6 +40,17 @@ export const ProviderRoutes = lazy(() => const enabled = config.enabled_providers ? new Set(config.enabled_providers) : undefined const allProviders = await ModelsDev.get() + + // Include LiteLLM so it is available for interactive configuration + if (!allProviders["litellm"]) { + allProviders["litellm"] = { + id: "litellm", + name: "LiteLLM", + env: ["LITELLM_API_KEY"], + models: {}, + } as (typeof allProviders)["string"] + } + const filteredProviders: Record = {} for (const [key, value] of Object.entries(allProviders)) { if ((enabled ? enabled.has(key) : true) && !disabled.has(key)) { @@ -54,7 +65,7 @@ export const ProviderRoutes = lazy(() => ) return c.json({ all: Object.values(providers), - default: mapValues(providers, (item) => Provider.sort(Object.values(item.models))[0].id), + default: mapValues(providers, (item) => Provider.sort(Object.values(item.models))[0]?.id), connected: Object.keys(connected), }) }, From d859774dc79a4f42ada81994073c690b022b1a27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Fri, 20 Feb 2026 21:11:12 +0100 Subject: [PATCH 06/18] feat(opencode): add litellm multi-step connect flow in TUI --- .../cli/cmd/tui/component/dialog-provider.tsx | 50 +++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/packages/opencode/src/cli/cmd/tui/component/dialog-provider.tsx b/packages/opencode/src/cli/cmd/tui/component/dialog-provider.tsx index 9682bee4ead2..ade9f7f39c5f 100644 --- a/packages/opencode/src/cli/cmd/tui/component/dialog-provider.tsx +++ b/packages/opencode/src/cli/cmd/tui/component/dialog-provider.tsx @@ -83,6 +83,9 @@ export function createDialogProviderOptions() { } } if (method.type === "api") { + if (provider.id === "litellm") { + return dialog.replace(() => ) + } return dialog.replace(() => ) } }, @@ -241,3 +244,50 @@ function ApiMethod(props: ApiMethodProps) { /> ) } + +function LiteLLMMethod() { + const dialog = useDialog() + const sdk = useSDK() + const sync = useSync() + const { theme } = useTheme() + + return ( + ( + Enter the base URL of your LiteLLM proxy server. + )} + onConfirm={async (baseURL) => { + const url = baseURL?.trim() || "http://localhost:4000" + dialog.replace(() => ( + ( + Enter the API key for your LiteLLM proxy, or leave empty if not required. + )} + onConfirm={async (apiKey) => { + await sdk.client.config.update({ + provider: { + litellm: { + options: { baseURL: url }, + }, + }, + }) + if (apiKey?.trim()) { + await sdk.client.auth.set({ + providerID: "litellm", + auth: { type: "api", key: apiKey.trim() }, + }) + } + await sdk.client.instance.dispose() + await sync.bootstrap() + dialog.replace(() => ) + }} + /> + )) + }} + /> + ) +} From a55192846599356b1300b7e984e2fe3f42062209 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Fri, 20 Feb 2026 21:11:19 +0100 Subject: [PATCH 07/18] feat(app): add litellm connect dialog with base URL and API key fields --- .../components/dialog-connect-provider.tsx | 71 ++++++++++++++++++- 1 file changed, 70 insertions(+), 1 deletion(-) diff --git a/packages/app/src/components/dialog-connect-provider.tsx b/packages/app/src/components/dialog-connect-provider.tsx index 90f4f41f7c6f..505da322b122 100644 --- a/packages/app/src/components/dialog-connect-provider.tsx +++ b/packages/app/src/components/dialog-connect-provider.tsx @@ -10,7 +10,7 @@ import { ProviderIcon } from "@opencode-ai/ui/provider-icon" import { Spinner } from "@opencode-ai/ui/spinner" import { TextField } from "@opencode-ai/ui/text-field" import { showToast } from "@opencode-ai/ui/toast" -import { createMemo, Match, onCleanup, onMount, Switch } from "solid-js" +import { createMemo, Match, onCleanup, onMount, Show, Switch } from "solid-js" import { createStore, produce } from "solid-js/store" import { Link } from "@/components/link" import { useLanguage } from "@/context/language" @@ -309,6 +309,72 @@ export function DialogConnectProvider(props: { provider: string }) { ) } + function LiteLLMAuthView() { + const [formStore, setFormStore] = createStore({ + baseURL: "", + apiKey: "", + error: undefined as string | undefined, + }) + + async function handleSubmit(e: SubmitEvent) { + e.preventDefault() + + const form = e.currentTarget as HTMLFormElement + const formData = new FormData(form) + const baseURL = (formData.get("baseURL") as string)?.trim() || "http://localhost:4000" + const apiKey = (formData.get("apiKey") as string)?.trim() + + setFormStore("error", undefined) + await globalSDK.client.config.update({ + provider: { + litellm: { + options: { baseURL }, + }, + }, + }) + if (apiKey) { + await globalSDK.client.auth.set({ + providerID: props.provider, + auth: { type: "api", key: apiKey }, + }) + } + await complete() + } + + return ( +
+
+ Connect to a LiteLLM proxy server. Models will be discovered automatically. +
+
+ setFormStore("baseURL", v)} + /> + setFormStore("apiKey", v)} + /> + +
{formStore.error}
+
+ + +
+ ) + } + function OAuthCodeView() { const [formStore, setFormStore] = createStore({ value: "", @@ -479,6 +545,9 @@ export function DialogConnectProvider(props: { provider: string }) { + + + From f853c63a9998e56a7280f3835f1a1e12bf507525 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Fri, 20 Feb 2026 21:13:36 +0100 Subject: [PATCH 08/18] refactor: clean up litellm code to follow style guide --- .../components/dialog-connect-provider.tsx | 20 +++++++------------ packages/opencode/src/provider/provider.ts | 14 ++++++++----- 2 files changed, 16 insertions(+), 18 deletions(-) diff --git a/packages/app/src/components/dialog-connect-provider.tsx b/packages/app/src/components/dialog-connect-provider.tsx index 505da322b122..ce404bd2a359 100644 --- a/packages/app/src/components/dialog-connect-provider.tsx +++ b/packages/app/src/components/dialog-connect-provider.tsx @@ -10,7 +10,7 @@ import { ProviderIcon } from "@opencode-ai/ui/provider-icon" import { Spinner } from "@opencode-ai/ui/spinner" import { TextField } from "@opencode-ai/ui/text-field" import { showToast } from "@opencode-ai/ui/toast" -import { createMemo, Match, onCleanup, onMount, Show, Switch } from "solid-js" +import { createMemo, Match, onCleanup, onMount, Switch } from "solid-js" import { createStore, produce } from "solid-js/store" import { Link } from "@/components/link" import { useLanguage } from "@/context/language" @@ -313,29 +313,26 @@ export function DialogConnectProvider(props: { provider: string }) { const [formStore, setFormStore] = createStore({ baseURL: "", apiKey: "", - error: undefined as string | undefined, }) async function handleSubmit(e: SubmitEvent) { e.preventDefault() - const form = e.currentTarget as HTMLFormElement - const formData = new FormData(form) - const baseURL = (formData.get("baseURL") as string)?.trim() || "http://localhost:4000" - const apiKey = (formData.get("apiKey") as string)?.trim() + const data = new FormData(e.currentTarget as HTMLFormElement) + const url = (data.get("baseURL") as string)?.trim() || "http://localhost:4000" + const key = (data.get("apiKey") as string)?.trim() - setFormStore("error", undefined) await globalSDK.client.config.update({ provider: { litellm: { - options: { baseURL }, + options: { baseURL: url }, }, }, }) - if (apiKey) { + if (key) { await globalSDK.client.auth.set({ providerID: props.provider, - auth: { type: "api", key: apiKey }, + auth: { type: "api", key }, }) } await complete() @@ -364,9 +361,6 @@ export function DialogConnectProvider(props: { provider: string }) { value={formStore.apiKey} onChange={(v) => setFormStore("apiKey", v)} /> - -
{formStore.error}
-
diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index 8bbf1cf335a9..ca5407d6de34 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -593,13 +593,17 @@ export namespace Provider { const config = await Config.get() const providerConfig = config.provider?.["litellm"] - const hasEnv = !!(Env.get("LITELLM_API_KEY") || Env.get("LITELLM_HOST") || Env.get("LITELLM_BASE_URL")) - const hasConfig = !!providerConfig const auth = await Auth.get("litellm") - const hasAuth = auth?.type === "api" // Skip discovery when there is no configuration at all - if (!hasEnv && !hasConfig && !hasAuth) return { autoload: false } + if ( + !providerConfig && + !Env.get("LITELLM_API_KEY") && + !Env.get("LITELLM_HOST") && + !Env.get("LITELLM_BASE_URL") && + auth?.type !== "api" + ) + return { autoload: false } const baseURL = providerConfig?.options?.baseURL ?? @@ -611,7 +615,7 @@ export namespace Provider { if (providerConfig?.options?.apiKey) return providerConfig.options.apiKey const envKey = Env.get("LITELLM_API_KEY") if (envKey) return envKey - if (hasAuth) return auth.key + if (auth?.type === "api") return auth.key return undefined })() From 09ea704fc238023fc81e3c79ff3ba81b449a29c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Fri, 20 Feb 2026 22:26:43 +0100 Subject: [PATCH 09/18] fix(opencode): use global config update for litellm base URL persistence --- .../app/src/components/dialog-connect-provider.tsx | 10 ++++++---- .../src/cli/cmd/tui/component/dialog-provider.tsx | 10 ++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/packages/app/src/components/dialog-connect-provider.tsx b/packages/app/src/components/dialog-connect-provider.tsx index ce404bd2a359..c7f9dc8582d8 100644 --- a/packages/app/src/components/dialog-connect-provider.tsx +++ b/packages/app/src/components/dialog-connect-provider.tsx @@ -322,10 +322,12 @@ export function DialogConnectProvider(props: { provider: string }) { const url = (data.get("baseURL") as string)?.trim() || "http://localhost:4000" const key = (data.get("apiKey") as string)?.trim() - await globalSDK.client.config.update({ - provider: { - litellm: { - options: { baseURL: url }, + await globalSDK.client.global.config.update({ + config: { + provider: { + litellm: { + options: { baseURL: url }, + }, }, }, }) diff --git a/packages/opencode/src/cli/cmd/tui/component/dialog-provider.tsx b/packages/opencode/src/cli/cmd/tui/component/dialog-provider.tsx index ade9f7f39c5f..3a9bb38f473f 100644 --- a/packages/opencode/src/cli/cmd/tui/component/dialog-provider.tsx +++ b/packages/opencode/src/cli/cmd/tui/component/dialog-provider.tsx @@ -268,10 +268,12 @@ function LiteLLMMethod() { Enter the API key for your LiteLLM proxy, or leave empty if not required. )} onConfirm={async (apiKey) => { - await sdk.client.config.update({ - provider: { - litellm: { - options: { baseURL: url }, + await sdk.client.global.config.update({ + config: { + provider: { + litellm: { + options: { baseURL: url }, + }, }, }, }) From 7394dc65ebbeea0269551bf20760a1460369c367 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Fri, 20 Feb 2026 22:52:48 +0100 Subject: [PATCH 10/18] feat(opencode): store underlying model in litellm model options --- packages/opencode/src/provider/litellm.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/opencode/src/provider/litellm.ts b/packages/opencode/src/provider/litellm.ts index 958da8f82a7f..590c5f65a4ef 100644 --- a/packages/opencode/src/provider/litellm.ts +++ b/packages/opencode/src/provider/litellm.ts @@ -85,7 +85,7 @@ export namespace LiteLLM { }, status: "active", headers: {}, - options: {}, + options: underlyingModel ? { underlyingModel } : {}, cost: { input: inputCost, output: outputCost, From 1049e5e2391da1e077ce7cb9cc84f5f534c37060 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Fri, 20 Feb 2026 22:52:54 +0100 Subject: [PATCH 11/18] fix(opencode): detect litellm Claude models via underlying model info --- packages/opencode/src/provider/transform.ts | 23 ++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index 58cbd91930dc..82b6f7cb8abb 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -71,7 +71,11 @@ export namespace ProviderTransform { .filter((msg): msg is ModelMessage => msg !== undefined && msg.content !== "") } - if (model.api.id.includes("claude")) { + const isClaudeToolCall = + model.api.id.includes("claude") || + (model.providerID === "litellm" && + ((model.options?.underlyingModel as string) ?? "").toLowerCase().includes("claude")) + if (isClaudeToolCall) { return msgs.map((msg) => { if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) { msg.content = msg.content.map((part) => { @@ -364,10 +368,23 @@ export namespace ProviderTransform { if (id.includes("grok")) return {} // LiteLLM proxied models: infer reasoning variant from the underlying model - // to avoid false-positive reasoning param injection for aliased models + // to avoid false-positive reasoning param injection for aliased models. + // Model aliases (e.g., "sonnet-4.5") may not contain "claude" or "anthropic", + // so we also check the underlying model stored in options.underlyingModel + // (e.g., "azure_ai/claude-sonnet-4-5"). if (model.providerID === "litellm" && model.capabilities.reasoning) { const apiId = model.api.id.toLowerCase() - if (apiId.includes("claude") || apiId.includes("anthropic")) { + const underlying = ((model.options?.underlyingModel as string) ?? "").toLowerCase() + const isClaude = [apiId, underlying].some((s) => s.includes("claude") || s.includes("anthropic")) + if (isClaude) { + const isAdaptive = ["opus-4-6", "opus-4.6", "sonnet-4-6", "sonnet-4.6"].some( + (v) => apiId.includes(v) || underlying.includes(v), + ) + if (isAdaptive) { + return Object.fromEntries( + ["low", "medium", "high", "max"].map((effort) => [effort, { thinking: { type: "adaptive" }, effort }]), + ) + } return { high: { thinking: { From 9bbd29a57dca66c128c25a921ef73cf2c03e2d14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Fri, 20 Feb 2026 23:38:56 +0100 Subject: [PATCH 12/18] fix(opencode): use snake_case budget_tokens for litellm thinking variants --- packages/opencode/src/provider/transform.ts | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index 82b6f7cb8abb..23cdba654618 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -372,6 +372,9 @@ export namespace ProviderTransform { // Model aliases (e.g., "sonnet-4.5") may not contain "claude" or "anthropic", // so we also check the underlying model stored in options.underlyingModel // (e.g., "azure_ai/claude-sonnet-4-5"). + // NOTE: @ai-sdk/openai-compatible passes provider options as raw request body + // fields, so we must use snake_case (budget_tokens) not camelCase (budgetTokens) + // since LiteLLM forwards them directly to the upstream API. if (model.providerID === "litellm" && model.capabilities.reasoning) { const apiId = model.api.id.toLowerCase() const underlying = ((model.options?.underlyingModel as string) ?? "").toLowerCase() @@ -382,20 +385,23 @@ export namespace ProviderTransform { ) if (isAdaptive) { return Object.fromEntries( - ["low", "medium", "high", "max"].map((effort) => [effort, { thinking: { type: "adaptive" }, effort }]), + ["low", "medium", "high", "max"].map((effort) => [ + effort, + { thinking: { type: "enabled", budget_tokens: Math.min(16_000, Math.floor(model.limit.output / 2 - 1)) } }, + ]), ) } return { high: { thinking: { type: "enabled", - budgetTokens: Math.min(16_000, Math.floor(model.limit.output / 2 - 1)), + budget_tokens: Math.min(16_000, Math.floor(model.limit.output / 2 - 1)), }, }, max: { thinking: { type: "enabled", - budgetTokens: Math.min(31_999, model.limit.output - 1), + budget_tokens: Math.min(31_999, model.limit.output - 1), }, }, } From 7ea81abf2316f91df08815f15c13ec51301d5152 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Sat, 21 Feb 2026 00:56:45 +0100 Subject: [PATCH 13/18] fix(opencode): select correct system prompt for litellm Claude models --- packages/opencode/src/session/system.ts | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/packages/opencode/src/session/system.ts b/packages/opencode/src/session/system.ts index a61dd8cba551..b7b999f9aee9 100644 --- a/packages/opencode/src/session/system.ts +++ b/packages/opencode/src/session/system.ts @@ -17,12 +17,16 @@ export namespace SystemPrompt { } export function provider(model: Provider.Model) { - if (model.api.id.includes("gpt-5")) return [PROMPT_CODEX] - if (model.api.id.includes("gpt-") || model.api.id.includes("o1") || model.api.id.includes("o3")) - return [PROMPT_BEAST] - if (model.api.id.includes("gemini-")) return [PROMPT_GEMINI] - if (model.api.id.includes("claude")) return [PROMPT_ANTHROPIC] - if (model.api.id.toLowerCase().includes("trinity")) return [PROMPT_TRINITY] + const apiId = model.api.id + const underlying = ((model.options?.underlyingModel as string) ?? "").toLowerCase() + const isLiteLLMClaude = + model.providerID === "litellm" && (underlying.includes("claude") || underlying.includes("anthropic")) + + if (apiId.includes("gpt-5")) return [PROMPT_CODEX] + if (apiId.includes("gpt-") || apiId.includes("o1") || apiId.includes("o3")) return [PROMPT_BEAST] + if (apiId.includes("gemini-")) return [PROMPT_GEMINI] + if (apiId.includes("claude") || isLiteLLMClaude) return [PROMPT_ANTHROPIC] + if (apiId.toLowerCase().includes("trinity")) return [PROMPT_TRINITY] return [PROMPT_ANTHROPIC_WITHOUT_TODO] } From 351dcd060d70288688aac6b45498617027edeac8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Sat, 21 Feb 2026 00:56:50 +0100 Subject: [PATCH 14/18] fix(opencode): filter underlyingModel from litellm provider options --- packages/opencode/src/session/llm.ts | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts index 60c06e615c93..c0810df4fdae 100644 --- a/packages/opencode/src/session/llm.ts +++ b/packages/opencode/src/session/llm.ts @@ -200,7 +200,7 @@ export namespace LLM { temperature: params.temperature, topP: params.topP, topK: params.topK, - providerOptions: ProviderTransform.providerOptions(input.model, params.options), + providerOptions: ProviderTransform.providerOptions(input.model, filterInternalOptions(params.options)), activeTools: Object.keys(tools).filter((x) => x !== "invalid"), tools, toolChoice: input.toolChoice, @@ -266,6 +266,14 @@ export namespace LLM { return input.tools } + // Filter internal metadata keys from options before passing to providerOptions. + // These keys are used for internal logic (e.g., variant detection) but should not + // be sent as request body fields to the provider API. + function filterInternalOptions(options: Record): Record { + const { underlyingModel, ...rest } = options + return rest + } + // Check if messages contain any tool-call content // Used to determine if a dummy tool should be added for LiteLLM proxy compatibility export function hasToolCalls(messages: ModelMessage[]): boolean { From f2dee7c43c62b7fcba9742ea1e9bb7fcbf6ca61f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Sat, 21 Feb 2026 00:56:54 +0100 Subject: [PATCH 15/18] feat(opencode): enable prompt caching for litellm Claude models --- packages/opencode/src/provider/transform.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index 23cdba654618..aa34e8e3bd1e 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -256,13 +256,17 @@ export namespace ProviderTransform { export function message(msgs: ModelMessage[], model: Provider.Model, options: Record) { msgs = unsupportedParts(msgs, model) msgs = normalizeMessages(msgs, model, options) + const underlying = ((model.options?.underlyingModel as string) ?? "").toLowerCase() + const isLiteLLMClaude = + model.providerID === "litellm" && (underlying.includes("claude") || underlying.includes("anthropic")) if ( (model.providerID === "anthropic" || model.api.id.includes("anthropic") || model.api.id.includes("claude") || model.id.includes("anthropic") || model.id.includes("claude") || - model.api.npm === "@ai-sdk/anthropic") && + model.api.npm === "@ai-sdk/anthropic" || + isLiteLLMClaude) && model.api.npm !== "@ai-sdk/gateway" ) { msgs = applyCaching(msgs, model) From 6783600f936f80ba5fbc328e37b2ecbf0917b9a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Sat, 21 Feb 2026 20:57:08 +0100 Subject: [PATCH 16/18] fix(opencode): add 10s timeout to bun info registry check --- packages/opencode/src/bun/registry.ts | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/packages/opencode/src/bun/registry.ts b/packages/opencode/src/bun/registry.ts index c567668acd71..aa4118a31cc9 100644 --- a/packages/opencode/src/bun/registry.ts +++ b/packages/opencode/src/bun/registry.ts @@ -19,7 +19,17 @@ export namespace PackageRegistry { }, }) - const code = await result.exited + const code = await Promise.race([ + result.exited, + new Promise((resolve) => setTimeout(() => resolve(null), 10_000)), + ]) + + if (code === null) { + result.kill() + log.warn("bun info timed out", { pkg, field }) + return null + } + const stdout = result.stdout ? await readableStreamToText(result.stdout) : "" const stderr = result.stderr ? await readableStreamToText(result.stderr) : "" From d3e6960a96487040784d4a8fbdb4537f5316bff4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Sat, 21 Feb 2026 22:38:45 +0100 Subject: [PATCH 17/18] refactor(opencode): replace try/catch with .catch() in litellm code --- packages/opencode/src/provider/litellm.ts | 43 ++++++++++------------ packages/opencode/src/provider/provider.ts | 12 ++---- 2 files changed, 22 insertions(+), 33 deletions(-) diff --git a/packages/opencode/src/provider/litellm.ts b/packages/opencode/src/provider/litellm.ts index 590c5f65a4ef..5f54071b5003 100644 --- a/packages/opencode/src/provider/litellm.ts +++ b/packages/opencode/src/provider/litellm.ts @@ -222,30 +222,25 @@ export namespace LiteLLM { headers["Authorization"] = `Bearer ${options.apiKey}` } - try { - // Try /model/info first for rich metadata, fall back to /models - const rich = await fetchModelInfo(base, headers, timeout) - if (rich) { - log.info("discovered models from LiteLLM /model/info", { - count: Object.keys(rich).length, - host, - }) - return rich - } - - const basic = await fetchModelList(base, headers, timeout) - if (Object.keys(basic).length > 0) { - log.info("discovered models from /models (fallback)", { - count: Object.keys(basic).length, - host, - }) - return basic - } - - return undefined - } catch (e) { - log.warn("LiteLLM model discovery error", { error: e, host }) - return undefined + // Try /model/info first for rich metadata, fall back to /models + const rich = await fetchModelInfo(base, headers, timeout) + if (rich) { + log.info("discovered models from LiteLLM /model/info", { + count: Object.keys(rich).length, + host, + }) + return rich } + + const basic = await fetchModelList(base, headers, timeout) + if (Object.keys(basic).length > 0) { + log.info("discovered models from /models (fallback)", { + count: Object.keys(basic).length, + host, + }) + return basic + } + + return undefined } } diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index ca5407d6de34..fb1b919ee333 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -619,15 +619,9 @@ export namespace Provider { return undefined })() - const customHeaders = iife(() => { - const raw = Env.get("LITELLM_CUSTOM_HEADERS") - if (!raw) return {} - try { - return JSON.parse(raw) as Record - } catch { - return {} - } - }) + const customHeaders = await Promise.resolve(Env.get("LITELLM_CUSTOM_HEADERS")) + .then((raw) => (raw ? (JSON.parse(raw) as Record) : {})) + .catch(() => ({})) const timeout = Number(Env.get("LITELLM_TIMEOUT") ?? "5000") From ee81b6b5dae08b74323623af7eb3b4a50925e80c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Balatoni?= Date: Sun, 22 Feb 2026 16:35:09 +0100 Subject: [PATCH 18/18] fix(opencode): clean up streams after killing timed-out bun info --- packages/opencode/src/bun/registry.ts | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/opencode/src/bun/registry.ts b/packages/opencode/src/bun/registry.ts index aa4118a31cc9..e7cbcd2729c2 100644 --- a/packages/opencode/src/bun/registry.ts +++ b/packages/opencode/src/bun/registry.ts @@ -26,6 +26,9 @@ export namespace PackageRegistry { if (code === null) { result.kill() + result.stdout?.cancel().catch(() => {}) + result.stderr?.cancel().catch(() => {}) + result.unref() log.warn("bun info timed out", { pkg, field }) return null }