mirror of
https://github.com/block/goose.git
synced 2026-04-28 03:29:36 +00:00
feat: add Ollama Cloud as a declarative provider (#8189)
This commit is contained in:
parent
31f5dcf00c
commit
8c3b5eb56a
3 changed files with 20 additions and 2 deletions
13
crates/goose/src/providers/declarative/ollama_cloud.json
Normal file
13
crates/goose/src/providers/declarative/ollama_cloud.json
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"name": "ollama_cloud",
|
||||
"engine": "openai",
|
||||
"display_name": "Ollama Cloud",
|
||||
"description": "Access hosted models on ollama.com via OpenAI-compatible API",
|
||||
"api_key_env": "OLLAMA_CLOUD_API_KEY",
|
||||
"base_url": "https://ollama.com/v1/chat/completions",
|
||||
"models": [],
|
||||
"dynamic_models": true,
|
||||
"headers": null,
|
||||
"timeout_seconds": null,
|
||||
"supports_streaming": true
|
||||
}
|
||||
|
|
@ -38,6 +38,7 @@ goose is compatible with a wide range of LLM providers, allowing you to choose a
|
|||
| [LM Studio](https://lmstudio.ai/) | Run local models with LM Studio's OpenAI-compatible server. **Because this provider runs locally, you must first [download a model](#local-llms).** | None required. Connects to local server at `localhost:1234` by default. |
|
||||
| [Mistral AI](https://mistral.ai/) | Provides access to Mistral models including general-purpose models, specialized coding models (Codestral), and multimodal models (Pixtral). | `MISTRAL_API_KEY` |
|
||||
| [Ollama](https://ollama.com/) | Local model runner supporting Qwen, Llama, DeepSeek, and other open-source models. **Because this provider runs locally, you must first [download and run a model](#local-llms).** | `OLLAMA_HOST` |
|
||||
| [Ollama Cloud](https://ollama.com/) | Access hosted models on ollama.com via OpenAI-compatible API. Requires an Ollama account and API key. | `OLLAMA_CLOUD_API_KEY` |
|
||||
| [OpenAI](https://platform.openai.com/api-keys) | Provides gpt-4o, o1, and other advanced language models. Also supports OpenAI-compatible endpoints (e.g., self-hosted LLaMA, vLLM, KServe). **o1-mini and o1-preview are not supported because goose uses tool calling.** | `OPENAI_API_KEY`, `OPENAI_HOST` (optional), `OPENAI_ORGANIZATION` (optional), `OPENAI_PROJECT` (optional), `OPENAI_CUSTOM_HEADERS` (optional) |
|
||||
| [OpenRouter](https://openrouter.ai/) | API gateway for unified access to various models with features like rate-limiting management. | `OPENROUTER_API_KEY` |
|
||||
| [OVHcloud AI](https://www.ovhcloud.com/en/public-cloud/ai-endpoints/) | Provides access to open-source models including Qwen, Llama, Mistral, and DeepSeek through AI Endpoints service. | `OVHCLOUD_API_KEY` |
|
||||
|
|
@ -993,9 +994,10 @@ Here are some local providers we support:
|
|||
6. Enter the host where your model is running
|
||||
|
||||
:::info Endpoint
|
||||
For Ollama, if you don't provide a host, we set it to `localhost:11434`.
|
||||
When constructing the URL, we prepend `http://` if the scheme is not `http` or `https`.
|
||||
For Ollama, if you don't provide a host, we set it to `localhost:11434`.
|
||||
When constructing the URL, we prepend `http://` if the scheme is not `http` or `https`.
|
||||
If you're running Ollama on a different server, you'll have to set `OLLAMA_HOST=http://{host}:{port}`.
|
||||
For hosted models on ollama.com, use the **Ollama Cloud** provider instead.
|
||||
:::
|
||||
|
||||
```
|
||||
|
|
|
|||
|
|
@ -44,6 +44,9 @@ export const configLabels: Record<string, string> = {
|
|||
// ollama
|
||||
OLLAMA_HOST: 'Ollama Host',
|
||||
|
||||
// ollama cloud
|
||||
OLLAMA_CLOUD_API_KEY: 'Ollama Cloud API Key',
|
||||
|
||||
// azure openai
|
||||
AZURE_OPENAI_API_KEY: 'Azure OpenAI API Key',
|
||||
AZURE_OPENAI_ENDPOINT: 'Azure OpenAI Endpoint',
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue