Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions packages/opencode/src/config/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -984,6 +984,13 @@ export namespace Config {
}),
)
.optional(),
shouldFetchModels: z
.boolean()
.default(true)
.optional()
.describe(
"Dynamically fetch available models from the provider's OpenAI-compatible /models endpoint at startup. Defaults to true. Fetched models are merged with manually configured ones (manual config takes precedence). Set to false to disable.",
),
options: z
.object({
apiKey: z.string().optional(),
Expand Down
158 changes: 158 additions & 0 deletions packages/opencode/src/provider/provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -754,6 +754,129 @@ export namespace Provider {
}
}

const DEFAULT_CONTEXT = 128000
const DEFAULT_OUTPUT = 32000

function emptyModel(providerID: string, id: string, npm: string, baseURL: string): Model {
return {
id,
providerID,
name: id,
api: { id, npm, url: baseURL },
status: "active",
family: "",
release_date: "",
headers: {},
options: {},
cost: { input: 0, output: 0, cache: { read: 0, write: 0 } },
limit: { context: DEFAULT_CONTEXT, output: DEFAULT_OUTPUT },
capabilities: {
temperature: true,
reasoning: false,
attachment: false,
toolcall: true,
input: { text: true, audio: false, image: false, video: false, pdf: false },
output: { text: true, audio: false, image: false, video: false, pdf: false },
interleaved: false,
},
variants: {},
}
}

async function fetchModelInfo(providerID: string, baseURL: string, npm: string, headers: Record<string, string>) {
const response = await fetch(`${baseURL}/model/info`, { headers, signal: AbortSignal.timeout(10_000) }).catch(
() => undefined,
)
if (!response?.ok) return undefined

const body = (await response.json()) as {
data?: Array<{
model_name: string
model_info?: {
max_input_tokens?: number | null
max_output_tokens?: number | null
max_tokens?: number | null
input_cost_per_token?: number | null
output_cost_per_token?: number | null
supports_vision?: boolean | null
supports_function_calling?: boolean | null
supports_reasoning?: boolean | null
supports_pdf_input?: boolean | null
}
}>
}
const items = body.data ?? []
if (items.length === 0) return undefined

const models: Record<string, Model> = {}
for (const item of items) {
if (!item.model_name || models[item.model_name]) continue
const info = item.model_info ?? {}
const vision = info.supports_vision === true
const model = emptyModel(providerID, item.model_name, npm, baseURL)
model.limit = {
context: info.max_input_tokens ?? DEFAULT_CONTEXT,
output: info.max_output_tokens ?? info.max_tokens ?? DEFAULT_OUTPUT,
}
model.cost = {
input: info.input_cost_per_token ?? 0,
output: info.output_cost_per_token ?? 0,
cache: { read: 0, write: 0 },
}
model.capabilities = {
...model.capabilities,
reasoning: info.supports_reasoning === true,
toolcall: info.supports_function_calling !== false,
attachment: vision,
input: { ...model.capabilities.input, image: vision, pdf: info.supports_pdf_input === true },
}
models[item.model_name] = model
}

log.info("fetchModels: fetched from /model/info", { providerID, count: Object.keys(models).length })
return models
}

async function fetchModelList(providerID: string, baseURL: string, npm: string, headers: Record<string, string>) {
const response = await fetch(`${baseURL}/models`, { headers, signal: AbortSignal.timeout(10_000) }).catch(
(e: unknown) => {
log.warn("fetchModels: error fetching /models", { providerID, error: e })
return undefined
},
)
if (!response?.ok) {
if (response) log.warn("fetchModels: failed to fetch /models", { providerID, status: response.status })
return {}
}

const body = (await response.json()) as { data?: Array<{ id: string }> }
const models: Record<string, Model> = {}
for (const item of body.data ?? []) {
if (!item.id) continue
models[item.id] = emptyModel(providerID, item.id, npm, baseURL)
}

log.info("fetchModels: fetched from /models", { providerID, count: Object.keys(models).length })
return models
}

async function fetchModels(providerID: string, options: Record<string, any>) {
const baseURL = options["baseURL"]?.replace(/\/+$/, "")
if (!baseURL) {
log.warn("fetchModels: no baseURL for provider", { providerID })
return {} as Record<string, Model>
}

const npm = options["npm"] ?? "@ai-sdk/openai-compatible"
const headers: Record<string, string> = { Accept: "application/json" }
if (options["apiKey"]) headers["Authorization"] = `Bearer ${options["apiKey"]}`

// try LiteLLM /model/info first (has limits, costs, capabilities), fall back to /models
const rich = await fetchModelInfo(providerID, baseURL, npm, headers)
if (rich && Object.keys(rich).length > 0) return rich
return fetchModelList(providerID, baseURL, npm, headers)
}

const state = Instance.state(async () => {
using _ = log.time("state")
const config = await Config.get()
Expand Down Expand Up @@ -1026,6 +1149,41 @@ export namespace Provider {
log.info("found", { providerID })
}

// fetch models dynamically in background for providers with shouldFetchModels enabled
const fetchTargets = configProviders.filter(([, p]) => p.shouldFetchModels !== false)
if (fetchTargets.length > 0) {
Promise.all(
fetchTargets.map(async ([providerID, provider]) => {
const info = database[providerID]
if (!info) return
const options = info.options ?? provider.options ?? {}
const npm = provider.npm ?? info.models[Object.keys(info.models)[0]]?.api.npm
log.info("fetchModels: fetching in background", { providerID })
const fetched = await fetchModels(providerID, { ...options, npm })
if (Object.keys(fetched).length === 0) return

const configProvider = config.provider?.[providerID]

// merge fetched models as base, existing manual models override
const existing = providers[providerID]
if (!existing) return
for (const [modelID, model] of Object.entries(fetched)) {
if (existing.models[modelID]) continue
if (model.status === "deprecated") continue
if (configProvider?.blacklist?.includes(modelID)) continue
if (configProvider?.whitelist && !configProvider.whitelist.includes(modelID)) continue
model.variants = mapValues(ProviderTransform.variants(model), (v) => v)
existing.models[modelID] = model
}

log.info("fetchModels: background fetch complete", {
providerID,
count: Object.keys(fetched).length,
})
}),
).catch((e) => log.warn("fetchModels: background fetch failed", { error: e }))
}

return {
models: languages,
providers,
Expand Down
Loading