feat: add Ollama Cloud as a declarative provider (#8189)

This commit is contained in:
Vincenzo Palazzo 2026-04-09 22:19:27 +02:00 committed by GitHub
parent 31f5dcf00c
commit 8c3b5eb56a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 20 additions and 2 deletions

View file

@ -0,0 +1,13 @@
{
"name": "ollama_cloud",
"engine": "openai",
"display_name": "Ollama Cloud",
"description": "Access hosted models on ollama.com via OpenAI-compatible API",
"api_key_env": "OLLAMA_CLOUD_API_KEY",
"base_url": "https://ollama.com/v1/chat/completions",
"models": [],
"dynamic_models": true,
"headers": null,
"timeout_seconds": null,
"supports_streaming": true
}

View file

@ -38,6 +38,7 @@ goose is compatible with a wide range of LLM providers, allowing you to choose a
| [LM Studio](https://lmstudio.ai/) | Run local models with LM Studio's OpenAI-compatible server. **Because this provider runs locally, you must first [download a model](#local-llms).** | None required. Connects to local server at `localhost:1234` by default. |
| [Mistral AI](https://mistral.ai/) | Provides access to Mistral models including general-purpose models, specialized coding models (Codestral), and multimodal models (Pixtral). | `MISTRAL_API_KEY` |
| [Ollama](https://ollama.com/) | Local model runner supporting Qwen, Llama, DeepSeek, and other open-source models. **Because this provider runs locally, you must first [download and run a model](#local-llms).** | `OLLAMA_HOST` |
| [Ollama Cloud](https://ollama.com/) | Access hosted models on ollama.com via OpenAI-compatible API. Requires an Ollama account and API key. | `OLLAMA_CLOUD_API_KEY` |
| [OpenAI](https://platform.openai.com/api-keys) | Provides gpt-4o, o1, and other advanced language models. Also supports OpenAI-compatible endpoints (e.g., self-hosted LLaMA, vLLM, KServe). **o1-mini and o1-preview are not supported because goose uses tool calling.** | `OPENAI_API_KEY`, `OPENAI_HOST` (optional), `OPENAI_ORGANIZATION` (optional), `OPENAI_PROJECT` (optional), `OPENAI_CUSTOM_HEADERS` (optional) |
| [OpenRouter](https://openrouter.ai/) | API gateway for unified access to various models with features like rate-limiting management. | `OPENROUTER_API_KEY` |
| [OVHcloud AI](https://www.ovhcloud.com/en/public-cloud/ai-endpoints/) | Provides access to open-source models including Qwen, Llama, Mistral, and DeepSeek through AI Endpoints service. | `OVHCLOUD_API_KEY` |
@ -993,9 +994,10 @@ Here are some local providers we support:
6. Enter the host where your model is running
:::info Endpoint
For Ollama, if you don't provide a host, we set it to `localhost:11434`.
When constructing the URL, we prepend `http://` if the scheme is not `http` or `https`.
For Ollama, if you don't provide a host, we set it to `localhost:11434`.
When constructing the URL, we prepend `http://` if the scheme is not `http` or `https`.
If you're running Ollama on a different server, you'll have to set `OLLAMA_HOST=http://{host}:{port}`.
For hosted models on ollama.com, use the **Ollama Cloud** provider instead.
:::
```

View file

@ -44,6 +44,9 @@ export const configLabels: Record<string, string> = {
// ollama
OLLAMA_HOST: 'Ollama Host',
// ollama cloud
OLLAMA_CLOUD_API_KEY: 'Ollama Cloud API Key',
// azure openai
AZURE_OPENAI_API_KEY: 'Azure OpenAI API Key',
AZURE_OPENAI_ENDPOINT: 'Azure OpenAI Endpoint',