diff --git a/bun.lock b/bun.lock index c83946240bc..12cc9c91670 100644 --- a/bun.lock +++ b/bun.lock @@ -278,6 +278,7 @@ "@clack/prompts": "1.0.0-alpha.1", "@hono/standard-validator": "0.1.5", "@hono/zod-validator": "catalog:", + "@llmgateway/ai-sdk-provider": "2.5.1", "@modelcontextprotocol/sdk": "1.25.2", "@octokit/graphql": "9.0.2", "@octokit/rest": "catalog:", @@ -1094,6 +1095,8 @@ "@leichtgewicht/ip-codec": ["@leichtgewicht/ip-codec@2.0.5", "", {}, "sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw=="], + "@llmgateway/ai-sdk-provider": ["@llmgateway/ai-sdk-provider@2.5.1", "", { "peerDependencies": { "ai": "^5.0.0-beta.12", "zod": "^3.24.1 || ^v4" } }, "sha512-UlmqsLr4Vsgd+mbEULFRyMStRS5DEd4SMUVU6Iu+n+uauKryJXilU/BXbAjHF7y8f595xHcYnpmPUocofN8xvQ=="], + "@mdx-js/mdx": ["@mdx-js/mdx@3.1.1", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdx": "^2.0.0", "acorn": "^8.0.0", "collapse-white-space": "^2.0.0", "devlop": "^1.0.0", "estree-util-is-identifier-name": "^3.0.0", "estree-util-scope": "^1.0.0", "estree-walker": "^3.0.0", "hast-util-to-jsx-runtime": "^2.0.0", "markdown-extensions": "^2.0.0", "recma-build-jsx": "^1.0.0", "recma-jsx": "^1.0.0", "recma-stringify": "^1.0.0", "rehype-recma": "^1.0.0", "remark-mdx": "^3.0.0", "remark-parse": "^11.0.0", "remark-rehype": "^11.0.0", "source-map": "^0.7.0", "unified": "^11.0.0", "unist-util-position-from-estree": "^2.0.0", "unist-util-stringify-position": "^4.0.0", "unist-util-visit": "^5.0.0", "vfile": "^6.0.0" } }, "sha512-f6ZO2ifpwAQIpzGWaBQT2TXxPv6z3RBzQKpVftEWN78Vl/YweF1uwussDx8ECAXVtr3Rs89fKyG9YlzUs9DyGQ=="], "@mixmark-io/domino": ["@mixmark-io/domino@2.2.0", "", {}, "sha512-Y28PR25bHXUg88kCV7nivXrP2Nj2RueZ3/l/jdx6J9f8J4nsEGcgX0Qe6lt7Pa+J79+kPiJU3LguR6O/6zrLOw=="], diff --git a/packages/app/src/hooks/use-providers.ts b/packages/app/src/hooks/use-providers.ts index 4a73fa05588..9a210d87311 100644 --- a/packages/app/src/hooks/use-providers.ts +++ b/packages/app/src/hooks/use-providers.ts @@ -3,7 +3,7 @@ import { base64Decode } from "@opencode-ai/util/encode" import { useParams } from "@solidjs/router" import { createMemo } from "solid-js" -export const popularProviders = ["opencode", "anthropic", "github-copilot", "openai", "google", "openrouter", "vercel"] +export const popularProviders = ["opencode", "anthropic", "github-copilot", "openai", "google", "openrouter", "vercel", "llmgateway"] export function useProviders() { const globalSync = useGlobalSync() diff --git a/packages/opencode/package.json b/packages/opencode/package.json index 2eaaa5f8526..27c7c7074fc 100644 --- a/packages/opencode/package.json +++ b/packages/opencode/package.json @@ -72,6 +72,7 @@ "@clack/prompts": "1.0.0-alpha.1", "@hono/standard-validator": "0.1.5", "@hono/zod-validator": "catalog:", + "@llmgateway/ai-sdk-provider": "2.5.1", "@modelcontextprotocol/sdk": "1.25.2", "@octokit/graphql": "9.0.2", "@octokit/rest": "catalog:", diff --git a/packages/opencode/src/cli/cmd/auth.ts b/packages/opencode/src/cli/cmd/auth.ts index 3dd7bcc35dd..5e3897cadcd 100644 --- a/packages/opencode/src/cli/cmd/auth.ts +++ b/packages/opencode/src/cli/cmd/auth.ts @@ -276,6 +276,7 @@ export const AuthLoginCommand = cmd({ google: 4, openrouter: 5, vercel: 6, + llmgateway: 7, } let provider = await prompts.autocomplete({ message: "Select provider", diff --git a/packages/opencode/src/provider/models.ts b/packages/opencode/src/provider/models.ts index 796dcb7c238..b7f44cd39da 100644 --- a/packages/opencode/src/provider/models.ts +++ b/packages/opencode/src/provider/models.ts @@ -10,6 +10,41 @@ export namespace ModelsDev { const log = Log.create({ service: "models.dev" }) const filepath = path.join(Global.Path.cache, "models.json") + // Built-in provider entries that should always be available in OpenCode's provider directory + // (even if models.dev doesn't list them). + const BUILTIN_PROVIDERS: Record = { + llmgateway: { + id: "llmgateway", + name: "LLM Gateway", + api: "https://api.llmgateway.io/v1", + npm: "@llmgateway/ai-sdk-provider", + env: ["LLM_GATEWAY_API_KEY", "LLMGATEWAY_API_KEY"], + // Keep at least one model so the UI can select a default model for the provider. + models: { + "glm-4.7": { + id: "glm-4.7", + name: "GLM-4.7", + release_date: "2025-12-22", + attachment: false, + reasoning: true, + temperature: false, + tool_call: true, + interleaved: true, + modalities: { input: ["text"], output: ["text"] }, + limit: { context: 200000, output: 128000 }, + options: {}, + }, + }, + }, + } + + function overlayBuiltins(providers: Record) { + for (const [id, provider] of Object.entries(BUILTIN_PROVIDERS)) { + if (!providers[id]) providers[id] = provider + } + return providers + } + export const Model = z.object({ id: z.string(), name: z.string(), @@ -79,9 +114,9 @@ export namespace ModelsDev { refresh() const file = Bun.file(filepath) const result = await file.json().catch(() => {}) - if (result) return result as Record + if (result) return overlayBuiltins(result as Record) const json = await data() - return JSON.parse(json) as Record + return overlayBuiltins(JSON.parse(json) as Record) } export async function refresh() { diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index 9b01eae9e9b..a74412bb10a 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -35,11 +35,29 @@ import { createGateway } from "@ai-sdk/gateway" import { createTogetherAI } from "@ai-sdk/togetherai" import { createPerplexity } from "@ai-sdk/perplexity" import { createVercel } from "@ai-sdk/vercel" +import { createLLMGateway } from "@llmgateway/ai-sdk-provider" import { ProviderTransform } from "./transform" export namespace Provider { const log = Log.create({ service: "provider" }) + // @llmgateway/ai-sdk-provider's return type doesn't currently satisfy `ai`'s Provider interface, + // even on versions that peer with ai@5. OpenCode does use image/embedding models in some flows, + // so we provide those via the OpenAI-compatible provider as a fallback, while keeping LLM Gateway + // for language models. + const createLLMGatewayAdapter: (options: any) => SDK = (options) => { + const llmgw: any = createLLMGateway(options) + const compat: any = createOpenAICompatible(options) + return { + // Prefer LLM Gateway for language models (routing/headers), but keep compat defaults. + ...compat, + ...llmgw, + // Ensure required Provider surface exists. + imageModel: llmgw.imageModel ?? compat.imageModel, + textEmbeddingModel: llmgw.textEmbeddingModel ?? compat.textEmbeddingModel, + } as SDK + } + const BUNDLED_PROVIDERS: Record SDK> = { "@ai-sdk/amazon-bedrock": createAmazonBedrock, "@ai-sdk/anthropic": createAnthropic, @@ -50,6 +68,7 @@ export namespace Provider { "@ai-sdk/openai": createOpenAI, "@ai-sdk/openai-compatible": createOpenAICompatible, "@openrouter/ai-sdk-provider": createOpenRouter, + "@llmgateway/ai-sdk-provider": createLLMGatewayAdapter, "@ai-sdk/xai": createXai, "@ai-sdk/mistral": createMistral, "@ai-sdk/groq": createGroq, @@ -311,6 +330,17 @@ export namespace Provider { }, } }, + llmgateway: async () => { + return { + autoload: false, + options: { + headers: { + "HTTP-Referer": "https://opencode.ai/", + "X-Title": "opencode", + }, + }, + } + }, vercel: async () => { return { autoload: false, diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index 38b2c9aa13e..9a468d4371a 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -441,7 +441,7 @@ export namespace ProviderTransform { ): Record { const result: Record = {} - if (model.api.npm === "@openrouter/ai-sdk-provider") { + if (model.api.npm === "@openrouter/ai-sdk-provider" || model.api.npm === "@llmgateway/ai-sdk-provider") { result["usage"] = { include: true, } @@ -506,7 +506,7 @@ export namespace ProviderTransform { } return { thinkingConfig: { thinkingBudget: 0 } } } - if (model.providerID === "openrouter") { + if (model.providerID === "openrouter" || model.providerID === "llmgateway") { if (model.api.id.includes("google")) { return { reasoning: { enabled: false } } } @@ -544,6 +544,10 @@ export namespace ProviderTransform { return { ["openrouter" as string]: options, } + case "@llmgateway/ai-sdk-provider": + return { + ["llmgateway" as string]: options, + } default: return { [model.providerID]: options, diff --git a/packages/opencode/test/preload.ts b/packages/opencode/test/preload.ts index 35b0b6c7642..16789f89108 100644 --- a/packages/opencode/test/preload.ts +++ b/packages/opencode/test/preload.ts @@ -45,6 +45,7 @@ delete process.env["AWS_PROFILE"] delete process.env["AWS_REGION"] delete process.env["AWS_BEARER_TOKEN_BEDROCK"] delete process.env["OPENROUTER_API_KEY"] +delete process.env["LLM_GATEWAY_API_KEY"] delete process.env["GROQ_API_KEY"] delete process.env["MISTRAL_API_KEY"] delete process.env["PERPLEXITY_API_KEY"] diff --git a/packages/ui/src/components/provider-icons/types.ts b/packages/ui/src/components/provider-icons/types.ts index 89fbc0625f5..e6e3b751fe0 100644 --- a/packages/ui/src/components/provider-icons/types.ts +++ b/packages/ui/src/components/provider-icons/types.ts @@ -26,6 +26,7 @@ export const iconNames = [ "perplexity", "ovhcloud", "openrouter", + "llmgateway", "opencode", "openai", "ollama-cloud", diff --git a/packages/web/src/content/docs/providers.mdx b/packages/web/src/content/docs/providers.mdx index 882500b96d7..5ce57c3f84a 100644 --- a/packages/web/src/content/docs/providers.mdx +++ b/packages/web/src/content/docs/providers.mdx @@ -1195,6 +1195,74 @@ OpenCode Zen is a list of tested and verified models provided by the OpenCode te --- +### LLM Gateway + +1. Head over to the [LLM Gateway dashboard](https://llmgateway.io/dashboard), click **Create API Key**, and copy the key. + +2. Run the `/connect` command and search for LLM Gateway. + + ```txt + /connect + ``` + +3. Enter the API key for the provider. + + ```txt + ┌ API key + │ + │ + └ enter + ``` + +4. Many LLM Gateway models are preloaded by default, run the `/models` command to select the one you want. + + ```txt + /models + ``` + + You can also add additional models through your opencode config. + + ```json title="opencode.json" {6} + { + "$schema": "https://opencode.ai/config.json", + "provider": { + "llmgateway": { + "models": { + "somecoolnewmodel": {} + } + } + } + } + ``` + +5. You can also customize them through your opencode config. Here's an example of specifying a provider + + ```json title="opencode.json" + { + "$schema": "https://opencode.ai/config.json", + "provider": { + "llmgateway": { + "models": { + "glm-4.7": { + "name": "GLM 4.7" + }, + "gpt-5.2": { + "name": "GPT-5.2" + }, + "google/gemini-2.5-pro": { + "name": "Gemini 2.5 Pro" + }, + "anthropic/claude-3-5-sonnet-20241022": { + "name": "Claude 3.5 Sonnet" + } + } + } + } + } + ``` + +--- + ### SAP AI Core SAP AI Core provides access to 40+ models from OpenAI, Anthropic, Google, Amazon, Meta, Mistral, and AI21 through a unified platform. diff --git a/packages/web/src/content/docs/troubleshooting.mdx b/packages/web/src/content/docs/troubleshooting.mdx index 6c857b7e3f2..e109ab34f6e 100644 --- a/packages/web/src/content/docs/troubleshooting.mdx +++ b/packages/web/src/content/docs/troubleshooting.mdx @@ -94,6 +94,7 @@ Examples: - `openai/gpt-4.1` - `openrouter/google/gemini-2.5-flash` +- `llmgateway/glm-4.7` - `opencode/kimi-k2` To figure out what models you have access to, run `opencode models` diff --git a/packages/web/src/content/docs/zen.mdx b/packages/web/src/content/docs/zen.mdx index 1b2c9b091af..bfa569d83b5 100644 --- a/packages/web/src/content/docs/zen.mdx +++ b/packages/web/src/content/docs/zen.mdx @@ -29,7 +29,7 @@ configured very differently; so you get very different performance and quality. We tested a select group of models and providers that work well with OpenCode. ::: -So if you are using a model through something like OpenRouter, you can never be +So if you are using a model through something like OpenRouter or LLM Gateway, you can never be sure if you are getting the best version of the model you want. To fix this, we did a couple of things: