Add Ollama basic auth support

This commit is contained in:
rcourtman 2026-04-01 12:00:38 +01:00
parent 54c71c88c4
commit 2a99e03831
14 changed files with 509 additions and 61 deletions

View file

@ -296,7 +296,7 @@ Configure in the UI: **Settings → System → AI Assistant**
- **OpenAI**
- **DeepSeek**
- **Google Gemini**
- **Ollama** (self-hosted, with tool/function calling support)
- **Ollama** (self-hosted, with tool/function calling support and optional Basic Auth for reverse-proxied endpoints)
- **OpenAI-compatible base URL** (for providers that implement the OpenAI API shape)
### Models
@ -328,6 +328,7 @@ Config directory: `/etc/pulse` (systemd) or `/data` (Docker/Kubernetes)
### Testing
- Test provider connectivity: `POST /api/ai/test` and `POST /api/ai/test/{provider}`
- Ollama tests validate both the server endpoint and model availability, not just basic reachability
- List available models: `GET /api/ai/models`
---

View file

@ -87,6 +87,8 @@ export const AISettings: Component = () => {
const [setupProvider, setSetupProvider] = createSignal<'anthropic' | 'openai' | 'deepseek' | 'gemini' | 'ollama'>('anthropic');
const [setupApiKey, setSetupApiKey] = createSignal('');
const [setupOllamaUrl, setSetupOllamaUrl] = createSignal('http://localhost:11434');
const [setupOllamaUsername, setSetupOllamaUsername] = createSignal('');
const [setupOllamaPassword, setSetupOllamaPassword] = createSignal('');
const [setupSaving, setSetupSaving] = createSignal(false);
// UI state for collapsible sections - START COLLAPSED for compact view
@ -116,6 +118,9 @@ export const AISettings: Component = () => {
deepseekApiKey: '',
geminiApiKey: '',
ollamaBaseUrl: 'http://localhost:11434',
ollamaUsername: '',
ollamaPassword: '',
clearOllamaPassword: false,
openaiBaseUrl: '',
// Cost controls
costBudgetUSD30d: '',
@ -151,6 +156,9 @@ export const AISettings: Component = () => {
deepseekApiKey: '',
geminiApiKey: '',
ollamaBaseUrl: 'http://localhost:11434',
ollamaUsername: '',
ollamaPassword: '',
clearOllamaPassword: false,
openaiBaseUrl: '',
costBudgetUSD30d: '',
requestTimeoutSeconds: 300,
@ -182,6 +190,9 @@ export const AISettings: Component = () => {
deepseekApiKey: '',
geminiApiKey: '',
ollamaBaseUrl: data.ollama_base_url || 'http://localhost:11434',
ollamaUsername: data.ollama_username || '',
ollamaPassword: '',
clearOllamaPassword: false,
openaiBaseUrl: data.openai_base_url || '',
costBudgetUSD30d:
typeof data.cost_budget_usd_30d === 'number' && data.cost_budget_usd_30d > 0
@ -509,6 +520,14 @@ export const AISettings: Component = () => {
if (form.ollamaBaseUrl.trim() && form.ollamaBaseUrl.trim() !== (settings()?.ollama_base_url || '')) {
payload.ollama_base_url = form.ollamaBaseUrl.trim();
}
if (form.ollamaUsername.trim() !== (settings()?.ollama_username || '')) {
payload.ollama_username = form.ollamaUsername.trim();
}
if (form.ollamaPassword !== '') {
payload.ollama_password = form.ollamaPassword;
} else if (form.clearOllamaPassword) {
payload.clear_ollama_password = true;
}
if (form.openaiBaseUrl !== (settings()?.openai_base_url || '')) {
payload.openai_base_url = form.openaiBaseUrl.trim();
}
@ -642,7 +661,11 @@ export const AISettings: Component = () => {
if (provider === 'openai') clearPayload.clear_openai_key = true;
if (provider === 'deepseek') clearPayload.clear_deepseek_key = true;
if (provider === 'gemini') clearPayload.clear_gemini_key = true;
if (provider === 'ollama') clearPayload.clear_ollama_url = true;
if (provider === 'ollama') {
clearPayload.clear_ollama_url = true;
clearPayload.clear_ollama_username = true;
clearPayload.clear_ollama_password = true;
}
await AIAPI.updateSettings(clearPayload);
@ -655,7 +678,12 @@ export const AISettings: Component = () => {
if (provider === 'openai') setForm('openaiApiKey', '');
if (provider === 'deepseek') setForm('deepseekApiKey', '');
if (provider === 'gemini') setForm('geminiApiKey', '');
if (provider === 'ollama') setForm('ollamaBaseUrl', '');
if (provider === 'ollama') {
setForm('ollamaBaseUrl', '');
setForm('ollamaUsername', '');
setForm('ollamaPassword', '');
setForm('clearOllamaPassword', false);
}
notificationStore.success(`${provider} credentials cleared`);
// Notify other components (like AIChat) that settings changed
@ -697,7 +725,13 @@ export const AISettings: Component = () => {
action={
(() => {
const s = settings();
const hasConfiguredProvider = s && (s.anthropic_configured || s.openai_configured || s.deepseek_configured || s.ollama_configured);
const hasConfiguredProvider = s && (
s.anthropic_configured ||
s.openai_configured ||
s.deepseek_configured ||
s.gemini_configured ||
s.ollama_configured
);
return (
<Toggle
@ -1293,10 +1327,66 @@ export const AISettings: Component = () => {
disabled={saving()}
/>
</div>
<div class="grid grid-cols-1 md:grid-cols-2 gap-2">
<div class="space-y-1">
<label class="text-xs text-gray-600 dark:text-gray-400">
Username
</label>
<input
type="text"
value={form.ollamaUsername}
onInput={(e) => setForm('ollamaUsername', e.currentTarget.value)}
placeholder="Optional"
class={controlClass()}
disabled={saving()}
/>
</div>
<div class="space-y-1">
<div class="flex items-center justify-between gap-2">
<label class="text-xs text-gray-600 dark:text-gray-400">
Password
</label>
<Show when={settings()?.ollama_password_set && !form.clearOllamaPassword}>
<button
type="button"
class="text-[11px] text-orange-600 dark:text-orange-400 hover:underline"
onClick={() => {
setForm('ollamaPassword', '');
setForm('clearOllamaPassword', true);
}}
disabled={saving()}
>
Clear saved password
</button>
</Show>
</div>
<input
type="password"
value={form.ollamaPassword}
onInput={(e) => {
setForm('ollamaPassword', e.currentTarget.value);
if (form.clearOllamaPassword) setForm('clearOllamaPassword', false);
}}
placeholder={settings()?.ollama_password_set && !form.clearOllamaPassword ? 'Saved password' : 'Optional'}
class={controlClass()}
disabled={saving()}
/>
<Show when={settings()?.ollama_password_set && !form.ollamaPassword && !form.clearOllamaPassword}>
<p class="text-[11px] text-gray-500">
A password is currently stored.
</p>
</Show>
<Show when={form.clearOllamaPassword}>
<p class="text-[11px] text-orange-600 dark:text-orange-400">
Saved password will be cleared on save.
</p>
</Show>
</div>
</div>
<div class="flex items-center justify-between">
<p class="text-xs text-gray-500">
<a href="https://ollama.ai" target="_blank" rel="noopener" class="text-blue-600 dark:text-blue-400 hover:underline">Learn about Ollama </a>
<span class="text-gray-400"> · Free & local</span>
<span class="text-gray-400"> · Optional Basic Auth for Ollama behind a reverse proxy</span>
</p>
<Show when={settings()?.ollama_configured}>
<div class="flex gap-1">
@ -1884,8 +1974,34 @@ export const AISettings: Component = () => {
placeholder="http://localhost:11434"
class="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-700 focus:ring-2 focus:ring-purple-500 focus:border-transparent"
/>
<div class="grid grid-cols-1 md:grid-cols-2 gap-3 mt-3">
<div>
<label class="block text-sm font-medium text-gray-700 dark:text-gray-300 mb-1.5">
Username
</label>
<input
type="text"
value={setupOllamaUsername()}
onInput={(e) => setSetupOllamaUsername(e.currentTarget.value)}
placeholder="Optional"
class="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-700 focus:ring-2 focus:ring-purple-500 focus:border-transparent"
/>
</div>
<div>
<label class="block text-sm font-medium text-gray-700 dark:text-gray-300 mb-1.5">
Password
</label>
<input
type="password"
value={setupOllamaPassword()}
onInput={(e) => setSetupOllamaPassword(e.currentTarget.value)}
placeholder="Optional"
class="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-700 focus:ring-2 focus:ring-purple-500 focus:border-transparent"
/>
</div>
</div>
<p class="text-xs text-gray-500 mt-1.5">
Ollama runs locally - no API key needed
Ollama runs locally by default. Use username/password only if your endpoint is behind Basic Auth.
</p>
</div>
</Show>
@ -1898,6 +2014,8 @@ export const AISettings: Component = () => {
onClick={() => {
setShowSetupModal(false);
setSetupApiKey('');
setSetupOllamaUsername('');
setSetupOllamaPassword('');
}}
class="px-4 py-2 text-gray-700 dark:text-gray-300 hover:bg-gray-100 dark:hover:bg-gray-700 rounded-lg"
disabled={setupSaving()}
@ -1945,6 +2063,12 @@ export const AISettings: Component = () => {
return;
}
payload.ollama_base_url = setupOllamaUrl().trim();
if (setupOllamaUsername().trim()) {
payload.ollama_username = setupOllamaUsername().trim();
}
if (setupOllamaPassword() !== '') {
payload.ollama_password = setupOllamaPassword();
}
payload.model = 'ollama:llama3.2:latest';
}
@ -1954,6 +2078,8 @@ export const AISettings: Component = () => {
resetForm(updated);
setShowSetupModal(false);
setSetupApiKey('');
setSetupOllamaUsername('');
setSetupOllamaPassword('');
notificationStore.success('Pulse Assistant enabled! You can customize settings below.');
// Load models after setup
loadModels();

View file

@ -30,6 +30,7 @@ export const aiHelpContent: HelpContent[] = [
'Connect to a local or remote Ollama instance for Pulse Assistant and Patrol features.\n\n' +
'Ollama provides easy access to open-source models like Llama, Mistral, and CodeLlama ' +
'without requiring cloud API keys.\n\n' +
'If your Ollama endpoint is behind a reverse proxy, you can also store an optional Basic Auth username and password.\n\n' +
'Default: http://localhost:11434 (local Ollama installation)',
examples: [
'http://localhost:11434 (local)',

View file

@ -40,6 +40,8 @@ export interface AISettings {
gemini_configured: boolean; // true if Gemini API key is set
ollama_configured: boolean; // true (always available for attempt)
ollama_base_url: string; // Ollama server URL
ollama_username?: string; // Optional Basic Auth username for Ollama
ollama_password_set?: boolean; // true if an Ollama password is stored
openai_base_url?: string; // Custom OpenAI base URL
configured_providers: AIProvider[]; // List of providers with credentials
@ -82,6 +84,8 @@ export interface AISettingsUpdateRequest {
deepseek_api_key?: string; // Set DeepSeek API key
gemini_api_key?: string; // Set Gemini API key
ollama_base_url?: string; // Set Ollama server URL
ollama_username?: string; // Set Ollama Basic Auth username
ollama_password?: string; // Set Ollama Basic Auth password
openai_base_url?: string; // Set custom OpenAI base URL
// Clear flags for removing credentials
clear_anthropic_key?: boolean; // Clear Anthropic API key
@ -89,6 +93,8 @@ export interface AISettingsUpdateRequest {
clear_deepseek_key?: boolean; // Clear DeepSeek API key
clear_gemini_key?: boolean; // Clear Gemini API key
clear_ollama_url?: boolean; // Clear Ollama URL
clear_ollama_username?: boolean; // Clear Ollama Basic Auth username
clear_ollama_password?: boolean; // Clear Ollama Basic Auth password
// Cost controls
cost_budget_usd_30d?: number;

View file

@ -812,7 +812,7 @@ func (s *Service) createProviderForModel(modelStr string) (providers.StreamingPr
if baseURL == "" {
baseURL = "http://localhost:11434"
}
return providers.NewOllamaClient(modelName, baseURL, timeout), nil
return providers.NewOllamaClient(modelName, baseURL, s.cfg.OllamaUsername, s.cfg.OllamaPassword, timeout), nil
default:
return nil, fmt.Errorf("unsupported provider: %s", providerName)
}

View file

@ -2,6 +2,9 @@ package chat
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"github.com/rcourtman/pulse-go-rewrite/internal/ai/providers"
@ -123,6 +126,48 @@ func TestService_CreateProviderForModel(t *testing.T) {
}
}
func TestService_CreateProviderForModel_OllamaUsesConfiguredBasicAuth(t *testing.T) {
versionHits := 0
tagsHits := 0
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
username, password, ok := r.BasicAuth()
if !ok || username != "unai" || password != "secret" {
t.Fatalf("unexpected basic auth: ok=%v user=%q pass=%q", ok, username, password)
}
switch r.URL.Path {
case "/api/version":
versionHits++
_ = json.NewEncoder(w).Encode(map[string]any{"version": "0.1.0"})
case "/api/tags":
tagsHits++
_ = json.NewEncoder(w).Encode(map[string]any{"models": []map[string]any{{"name": "llama3:latest"}}})
default:
http.NotFound(w, r)
}
}))
defer server.Close()
svc := &Service{
cfg: &config.AIConfig{
OllamaBaseURL: server.URL,
OllamaUsername: "unai",
OllamaPassword: "secret",
},
}
provider, err := svc.createProviderForModel("ollama:llama3")
if err != nil {
t.Fatalf("expected ollama provider to be created: %v", err)
}
if err := provider.TestConnection(context.Background()); err != nil {
t.Fatalf("expected ollama provider test connection to succeed: %v", err)
}
if versionHits != 1 || tagsHits != 1 {
t.Fatalf("expected version+tags check, got version=%d tags=%d", versionHits, tagsHits)
}
}
func TestService_ExecutePatrolStream_Success(t *testing.T) {
store, err := NewSessionStore(t.TempDir())
if err != nil {

View file

@ -53,7 +53,7 @@ func NewFromConfig(cfg *config.AIConfig) (Provider, error) {
return NewOpenAIClient(cfg.APIKey, cfg.GetModel(), cfg.GetBaseURL(), timeout), nil
case config.AIProviderOllama:
return NewOllamaClient(cfg.GetModel(), cfg.GetBaseURL(), timeout), nil
return NewOllamaClient(cfg.GetModel(), cfg.GetBaseURL(), cfg.OllamaUsername, cfg.OllamaPassword, timeout), nil
case config.AIProviderDeepSeek:
if cfg.APIKey == "" {
@ -119,7 +119,7 @@ func NewForProvider(cfg *config.AIConfig, provider, model string) (Provider, err
case config.AIProviderOllama:
baseURL := cfg.GetBaseURLForProvider(config.AIProviderOllama)
return NewOllamaClient(model, baseURL, timeout), nil
return NewOllamaClient(model, baseURL, cfg.OllamaUsername, cfg.OllamaPassword, timeout), nil
case config.AIProviderGemini:
apiKey := cfg.GetAPIKeyForProvider(config.AIProviderGemini)

View file

@ -28,7 +28,7 @@ func getOllamaURL() string {
}
func TestIntegration_Ollama_TestConnection(t *testing.T) {
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), 0)
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), "", "", 0)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
@ -41,7 +41,7 @@ func TestIntegration_Ollama_TestConnection(t *testing.T) {
}
func TestIntegration_Ollama_ListModels(t *testing.T) {
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), 0)
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), "", "", 0)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
@ -62,7 +62,7 @@ func TestIntegration_Ollama_ListModels(t *testing.T) {
}
func TestIntegration_Ollama_SimpleChat(t *testing.T) {
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), 0)
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), "", "", 0)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
@ -87,7 +87,7 @@ func TestIntegration_Ollama_SimpleChat(t *testing.T) {
}
func TestIntegration_Ollama_SystemPrompt(t *testing.T) {
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), 0)
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), "", "", 0)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
@ -117,7 +117,7 @@ func TestIntegration_Ollama_SystemPrompt(t *testing.T) {
}
func TestIntegration_Ollama_MultiTurnConversation(t *testing.T) {
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), 0)
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), "", "", 0)
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
@ -156,7 +156,7 @@ func TestIntegration_Ollama_MultiTurnConversation(t *testing.T) {
}
func TestIntegration_Ollama_TokenCounting(t *testing.T) {
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), 0)
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), "", "", 0)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
@ -184,7 +184,7 @@ func TestIntegration_Ollama_TokenCounting(t *testing.T) {
}
func TestIntegration_Ollama_ErrorHandling_BadModel(t *testing.T) {
client := providers.NewOllamaClient("nonexistent-model-12345", getOllamaURL(), 0)
client := providers.NewOllamaClient("nonexistent-model-12345", getOllamaURL(), "", "", 0)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
@ -203,7 +203,7 @@ func TestIntegration_Ollama_ErrorHandling_BadModel(t *testing.T) {
}
func TestIntegration_Ollama_Timeout(t *testing.T) {
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), 0)
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), "", "", 0)
// Very short timeout - should fail
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond)
@ -226,7 +226,7 @@ func TestIntegration_Ollama_Timeout(t *testing.T) {
// --- More useful tests below ---
func TestIntegration_Ollama_JSONOutput(t *testing.T) {
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), 0)
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), "", "", 0)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
@ -268,7 +268,7 @@ func TestIntegration_Ollama_JSONOutput(t *testing.T) {
}
func TestIntegration_Ollama_LongResponse(t *testing.T) {
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), 0)
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), "", "", 0)
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
@ -293,7 +293,7 @@ func TestIntegration_Ollama_LongResponse(t *testing.T) {
}
func TestIntegration_Ollama_EmptyMessage(t *testing.T) {
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), 0)
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), "", "", 0)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
@ -314,7 +314,7 @@ func TestIntegration_Ollama_EmptyMessage(t *testing.T) {
}
func TestIntegration_Ollama_SpecialCharacters(t *testing.T) {
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), 0)
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), "", "", 0)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
@ -337,7 +337,7 @@ func TestIntegration_Ollama_SpecialCharacters(t *testing.T) {
}
func TestIntegration_Ollama_ConcurrentRequests(t *testing.T) {
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), 0)
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), "", "", 0)
const numRequests = 3
results := make(chan error, numRequests)
@ -373,7 +373,7 @@ func TestIntegration_Ollama_ConcurrentRequests(t *testing.T) {
func TestIntegration_Ollama_InfrastructureAnalysis(t *testing.T) {
// This simulates what Pulse actually does - send infrastructure context
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), 0)
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), "", "", 0)
ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second)
defer cancel()
@ -417,7 +417,7 @@ What should I investigate first?`
}
func TestIntegration_Ollama_ModelName_Preserved(t *testing.T) {
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), 0)
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), "", "", 0)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
@ -442,7 +442,7 @@ func TestIntegration_Ollama_ModelName_Preserved(t *testing.T) {
}
func TestIntegration_Ollama_StopReason(t *testing.T) {
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), 0)
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), "", "", 0)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
@ -462,7 +462,7 @@ func TestIntegration_Ollama_StopReason(t *testing.T) {
}
func TestIntegration_Ollama_VeryLongInput(t *testing.T) {
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), 0)
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), "", "", 0)
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
@ -487,7 +487,7 @@ func TestIntegration_Ollama_VeryLongInput(t *testing.T) {
}
func TestIntegration_Ollama_RapidFireRequests(t *testing.T) {
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), 0)
client := providers.NewOllamaClient("tinyllama", getOllamaURL(), "", "", 0)
// Send 5 requests in rapid succession
for i := 0; i < 5; i++ {

View file

@ -15,13 +15,15 @@ import (
type OllamaClient struct {
model string
baseURL string
username string
password string
client *http.Client // For non-streaming requests (has overall timeout)
streamClient *http.Client // For streaming requests (no overall timeout — relies on context)
}
// NewOllamaClient creates a new Ollama API client
// timeout is optional - pass 0 to use the default 5 minute timeout
func NewOllamaClient(model, baseURL string, timeout time.Duration) *OllamaClient {
func NewOllamaClient(model, baseURL, username, password string, timeout time.Duration) *OllamaClient {
if baseURL == "" {
baseURL = "http://localhost:11434"
}
@ -34,8 +36,10 @@ func NewOllamaClient(model, baseURL string, timeout time.Duration) *OllamaClient
timeout = 300 * time.Second // Default 5 minutes
}
return &OllamaClient{
model: model,
baseURL: baseURL,
model: model,
baseURL: baseURL,
username: username,
password: password,
client: &http.Client{
Timeout: timeout,
},
@ -51,6 +55,12 @@ func NewOllamaClient(model, baseURL string, timeout time.Duration) *OllamaClient
}
}
func (c *OllamaClient) applyAuth(req *http.Request) {
if c.username != "" || c.password != "" {
req.SetBasicAuth(c.username, c.password)
}
}
// Name returns the provider name
func (c *OllamaClient) Name() string {
return "ollama"
@ -223,6 +233,7 @@ func (c *OllamaClient) Chat(ctx context.Context, req ChatRequest) (*ChatResponse
}
httpReq.Header.Set("Content-Type", "application/json")
c.applyAuth(httpReq)
resp, err := c.client.Do(httpReq)
if err != nil {
@ -388,6 +399,7 @@ func (c *OllamaClient) ChatStream(ctx context.Context, req ChatRequest, callback
}
httpReq.Header.Set("Content-Type", "application/json")
c.applyAuth(httpReq)
// Use streamClient which has no overall timeout — http.Client.Timeout
// includes response body reading time, which kills slow streaming responses.
@ -483,6 +495,7 @@ func (c *OllamaClient) TestConnection(ctx context.Context) error {
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
c.applyAuth(httpReq)
resp, err := c.client.Do(httpReq)
if err != nil {
@ -494,9 +507,76 @@ func (c *OllamaClient) TestConnection(ctx context.Context) error {
return fmt.Errorf("Ollama returned status %d", resp.StatusCode)
}
models, err := c.ListModels(ctx)
if err != nil {
return fmt.Errorf("connected to Ollama version endpoint but failed to list models: %w", err)
}
if c.model != "" && !ollamaModelAvailable(c.model, models) {
available := make([]string, 0, len(models))
for _, model := range models {
label := strings.TrimSpace(model.ID)
if label == "" {
label = strings.TrimSpace(model.Name)
}
if label != "" {
available = append(available, label)
}
}
if len(available) > 0 {
return fmt.Errorf("connected to Ollama but model %q is not available; found: %s", normalizeOllamaModelRef(c.model), strings.Join(available, ", "))
}
return fmt.Errorf("connected to Ollama but model %q is not available", normalizeOllamaModelRef(c.model))
}
return nil
}
func normalizeOllamaModelRef(model string) string {
model = strings.TrimSpace(model)
if strings.HasPrefix(model, "ollama:") {
model = strings.TrimPrefix(model, "ollama:")
}
return model
}
func splitOllamaModelRef(model string) (string, string) {
model = normalizeOllamaModelRef(model)
if model == "" {
return "", ""
}
idx := strings.LastIndex(model, ":")
if idx == -1 {
return model, ""
}
return model[:idx], model[idx+1:]
}
func ollamaModelAvailable(model string, available []ModelInfo) bool {
wantName, wantTag := splitOllamaModelRef(model)
if wantName == "" {
return len(available) > 0
}
for _, candidate := range available {
ref := strings.TrimSpace(candidate.ID)
if ref == "" {
ref = strings.TrimSpace(candidate.Name)
}
haveName, haveTag := splitOllamaModelRef(ref)
if haveName == "" {
continue
}
if wantName != haveName {
continue
}
if wantTag == "" || haveTag == "" || wantTag == haveTag {
return true
}
}
return false
}
// ListModels fetches available models from the local Ollama instance
func (c *OllamaClient) ListModels(ctx context.Context) ([]ModelInfo, error) {
url := c.baseURL + "/api/tags"
@ -504,6 +584,7 @@ func (c *OllamaClient) ListModels(ctx context.Context) ([]ModelInfo, error) {
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
c.applyAuth(httpReq)
resp, err := c.client.Do(httpReq)
if err != nil {

View file

@ -35,7 +35,7 @@ func TestOllamaClient_ChatStream_Success(t *testing.T) {
}))
defer server.Close()
client := NewOllamaClient("llama3", server.URL, 0)
client := NewOllamaClient("llama3", server.URL, "", "", 0)
var content string
var done bool
@ -78,7 +78,7 @@ func TestOllamaClient_ChatStream_ToolCall(t *testing.T) {
}))
defer server.Close()
client := NewOllamaClient("llama3", server.URL, 0)
client := NewOllamaClient("llama3", server.URL, "", "", 0)
var toolsFound []string
@ -111,7 +111,7 @@ func TestOllamaClient_ChatStream_Errors(t *testing.T) {
}))
defer server.Close()
client := NewOllamaClient("llama3", server.URL, 0)
client := NewOllamaClient("llama3", server.URL, "", "", 0)
err := client.ChatStream(context.Background(), ChatRequest{Messages: []Message{{Role: "user"}}}, func(e StreamEvent) {})
assert.Error(t, err)
@ -132,7 +132,7 @@ func TestOllamaClient_ListModels(t *testing.T) {
}))
defer server.Close()
client := NewOllamaClient("llama3", server.URL, 0)
client := NewOllamaClient("llama3", server.URL, "", "", 0)
models, err := client.ListModels(context.Background())
require.NoError(t, err)
@ -153,7 +153,7 @@ func TestNewOllamaClient_Normalization(t *testing.T) {
}
for _, tt := range tests {
client := NewOllamaClient("model", tt.input, 0)
client := NewOllamaClient("model", tt.input, "", "", 0)
assert.Equal(t, tt.expect, client.baseURL)
}
}
@ -186,7 +186,7 @@ func TestOllamaClient_Chat_Success(t *testing.T) {
}))
defer server.Close()
client := NewOllamaClient("llama3", server.URL, 0)
client := NewOllamaClient("llama3", server.URL, "", "", 0)
resp, err := client.Chat(context.Background(), ChatRequest{
Messages: []Message{{Role: "user", Content: "Hi"}},
Tools: []Tool{
@ -211,19 +211,63 @@ func TestOllamaClient_Chat_Success(t *testing.T) {
}
func TestOllamaClient_TestConnection(t *testing.T) {
versionHits := 0
tagsHits := 0
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, "/api/version", r.URL.Path)
w.WriteHeader(http.StatusOK)
username, password, ok := r.BasicAuth()
require.True(t, ok)
assert.Equal(t, "unai", username)
assert.Equal(t, "secret", password)
switch r.URL.Path {
case "/api/version":
versionHits++
_ = json.NewEncoder(w).Encode(map[string]string{"version": "0.1.0"})
case "/api/tags":
tagsHits++
_ = json.NewEncoder(w).Encode(map[string]any{
"models": []map[string]any{
{"name": "llama3:latest"},
},
})
default:
t.Fatalf("unexpected path %s", r.URL.Path)
}
}))
defer server.Close()
client := NewOllamaClient("llama3", server.URL, 0)
client := NewOllamaClient("ollama:llama3", server.URL, "unai", "secret", 0)
err := client.TestConnection(context.Background())
require.NoError(t, err)
assert.Equal(t, 1, versionHits)
assert.Equal(t, 1, tagsHits)
}
func TestOllamaClient_TestConnection_ModelMissing(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/api/version":
_ = json.NewEncoder(w).Encode(map[string]string{"version": "0.1.0"})
case "/api/tags":
_ = json.NewEncoder(w).Encode(map[string]any{
"models": []map[string]any{
{"name": "mistral:latest"},
},
})
default:
t.Fatalf("unexpected path %s", r.URL.Path)
}
}))
defer server.Close()
client := NewOllamaClient("ollama:llama3", server.URL, "", "", 0)
err := client.TestConnection(context.Background())
require.Error(t, err)
assert.Contains(t, err.Error(), `model "llama3" is not available`)
}
func TestOllamaClient_SupportsThinking(t *testing.T) {
client := NewOllamaClient("llama3", "http://localhost:11434", 0)
client := NewOllamaClient("llama3", "http://localhost:11434", "", "", 0)
if client.SupportsThinking("llama3") {
t.Fatal("expected SupportsThinking to be false")
}

View file

@ -739,12 +739,16 @@ func shouldRestartAIChat(req AISettingsUpdateRequest) bool {
req.DeepSeekAPIKey != nil ||
req.GeminiAPIKey != nil ||
req.OllamaBaseURL != nil ||
req.OllamaUsername != nil ||
req.OllamaPassword != nil ||
req.OpenAIBaseURL != nil ||
req.ClearAnthropicKey != nil ||
req.ClearOpenAIKey != nil ||
req.ClearDeepSeekKey != nil ||
req.ClearGeminiKey != nil ||
req.ClearOllamaURL != nil
req.ClearOllamaURL != nil ||
req.ClearOllamaUsername != nil ||
req.ClearOllamaPassword != nil
}
// SetOnControlSettingsChange sets a callback to be invoked when control settings change
@ -1029,6 +1033,8 @@ type AISettingsResponse struct {
GeminiConfigured bool `json:"gemini_configured"` // true if Gemini API key is set
OllamaConfigured bool `json:"ollama_configured"` // true (always available for attempt)
OllamaBaseURL string `json:"ollama_base_url"` // Ollama server URL
OllamaUsername string `json:"ollama_username,omitempty"` // Optional Basic Auth username for Ollama
OllamaPasswordSet bool `json:"ollama_password_set"` // true if an Ollama password is stored
OpenAIBaseURL string `json:"openai_base_url,omitempty"` // Custom OpenAI base URL
ConfiguredProviders []string `json:"configured_providers"` // List of provider names with credentials
// Cost controls
@ -1069,13 +1075,17 @@ type AISettingsUpdateRequest struct {
DeepSeekAPIKey *string `json:"deepseek_api_key,omitempty"` // Set DeepSeek API key
GeminiAPIKey *string `json:"gemini_api_key,omitempty"` // Set Gemini API key
OllamaBaseURL *string `json:"ollama_base_url,omitempty"` // Set Ollama server URL
OllamaUsername *string `json:"ollama_username,omitempty"` // Set Ollama Basic Auth username
OllamaPassword *string `json:"ollama_password,omitempty"` // Set Ollama Basic Auth password
OpenAIBaseURL *string `json:"openai_base_url,omitempty"` // Set custom OpenAI base URL
// Clear flags for removing credentials
ClearAnthropicKey *bool `json:"clear_anthropic_key,omitempty"` // Clear Anthropic API key
ClearOpenAIKey *bool `json:"clear_openai_key,omitempty"` // Clear OpenAI API key
ClearDeepSeekKey *bool `json:"clear_deepseek_key,omitempty"` // Clear DeepSeek API key
ClearGeminiKey *bool `json:"clear_gemini_key,omitempty"` // Clear Gemini API key
ClearOllamaURL *bool `json:"clear_ollama_url,omitempty"` // Clear Ollama URL
ClearAnthropicKey *bool `json:"clear_anthropic_key,omitempty"` // Clear Anthropic API key
ClearOpenAIKey *bool `json:"clear_openai_key,omitempty"` // Clear OpenAI API key
ClearDeepSeekKey *bool `json:"clear_deepseek_key,omitempty"` // Clear DeepSeek API key
ClearGeminiKey *bool `json:"clear_gemini_key,omitempty"` // Clear Gemini API key
ClearOllamaURL *bool `json:"clear_ollama_url,omitempty"` // Clear Ollama URL
ClearOllamaUsername *bool `json:"clear_ollama_username,omitempty"` // Clear Ollama Basic Auth username
ClearOllamaPassword *bool `json:"clear_ollama_password,omitempty"` // Clear Ollama Basic Auth password
// Cost controls
CostBudgetUSD30d *float64 `json:"cost_budget_usd_30d,omitempty"`
// Request timeout (seconds) - for slow hardware running local models
@ -1146,6 +1156,8 @@ func (h *AISettingsHandler) HandleGetAISettings(w http.ResponseWriter, r *http.R
GeminiConfigured: settings.HasProvider(config.AIProviderGemini),
OllamaConfigured: settings.HasProvider(config.AIProviderOllama),
OllamaBaseURL: settings.GetBaseURLForProvider(config.AIProviderOllama),
OllamaUsername: settings.OllamaUsername,
OllamaPasswordSet: settings.OllamaPassword != "",
OpenAIBaseURL: settings.OpenAIBaseURL,
ConfiguredProviders: settings.GetConfiguredProviders(),
CostBudgetUSD30d: settings.CostBudgetUSD30d,
@ -1321,6 +1333,16 @@ func (h *AISettingsHandler) HandleUpdateAISettings(w http.ResponseWriter, r *htt
} else if req.OllamaBaseURL != nil {
settings.OllamaBaseURL = strings.TrimSpace(*req.OllamaBaseURL)
}
if req.ClearOllamaUsername != nil && *req.ClearOllamaUsername {
settings.OllamaUsername = ""
} else if req.OllamaUsername != nil {
settings.OllamaUsername = strings.TrimSpace(*req.OllamaUsername)
}
if req.ClearOllamaPassword != nil && *req.ClearOllamaPassword {
settings.OllamaPassword = ""
} else if req.OllamaPassword != nil {
settings.OllamaPassword = *req.OllamaPassword
}
if req.OpenAIBaseURL != nil {
settings.OpenAIBaseURL = strings.TrimSpace(*req.OpenAIBaseURL)
}
@ -1563,6 +1585,8 @@ func (h *AISettingsHandler) HandleUpdateAISettings(w http.ResponseWriter, r *htt
GeminiConfigured: settings.HasProvider(config.AIProviderGemini),
OllamaConfigured: settings.HasProvider(config.AIProviderOllama),
OllamaBaseURL: settings.GetBaseURLForProvider(config.AIProviderOllama),
OllamaUsername: settings.OllamaUsername,
OllamaPasswordSet: settings.OllamaPassword != "",
OpenAIBaseURL: settings.OpenAIBaseURL,
ConfiguredProviders: settings.GetConfiguredProviders(),
RequestTimeoutSeconds: settings.RequestTimeoutSeconds,
@ -1694,8 +1718,10 @@ func (h *AISettingsHandler) HandleTestProvider(w http.ResponseWriter, r *http.Re
return
}
// Create provider and test connection
testProvider, err := providers.NewForProvider(cfg, provider, cfg.GetModel())
// Create provider and test connection using a model that belongs to the
// provider being tested, not necessarily the globally selected default model.
testModel := cfg.GetPreferredModelForProvider(provider)
testProvider, err := providers.NewForProvider(cfg, provider, testModel)
if err != nil {
testResult.Success = false
testResult.Message = fmt.Sprintf("Failed to create provider: %v", err)

View file

@ -59,10 +59,12 @@ func TestAISettingsHandler_GetAndUpdateSettings_RoundTrip(t *testing.T) {
// Update settings to enable AI via Ollama.
{
body, _ := json.Marshal(AISettingsUpdateRequest{
Enabled: ptr(true),
Provider: ptr("ollama"),
Model: ptr("ollama:llama3"),
OllamaBaseURL: ptr("http://localhost:11434"),
Enabled: ptr(true),
Provider: ptr("ollama"),
Model: ptr("ollama:llama3"),
OllamaBaseURL: ptr("http://localhost:11434"),
OllamaUsername: ptr("unai"),
OllamaPassword: ptr("secret"),
})
req := httptest.NewRequest(http.MethodPut, "/api/settings/ai", bytes.NewReader(body))
rec := httptest.NewRecorder()
@ -82,6 +84,9 @@ func TestAISettingsHandler_GetAndUpdateSettings_RoundTrip(t *testing.T) {
if resp.OllamaBaseURL != "http://localhost:11434" {
t.Fatalf("unexpected ollama base url: %+v", resp)
}
if resp.OllamaUsername != "unai" || !resp.OllamaPasswordSet {
t.Fatalf("expected ollama auth state in response, got %+v", resp)
}
}
// GET again should reflect persisted updates.
@ -101,6 +106,9 @@ func TestAISettingsHandler_GetAndUpdateSettings_RoundTrip(t *testing.T) {
if !resp.Enabled || !resp.OllamaConfigured {
t.Fatalf("expected enabled + ollama configured, got %+v", resp)
}
if resp.OllamaUsername != "unai" || !resp.OllamaPasswordSet {
t.Fatalf("expected persisted ollama auth state, got %+v", resp)
}
}
}
@ -234,12 +242,23 @@ func ptr[T any](v T) *T { return &v }
func TestAISettingsHandler_TestConnection_Ollama(t *testing.T) {
t.Parallel()
versionHits := 0
tagsHits := 0
ollama := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/api/version" {
http.NotFound(w, r)
return
username, password, ok := r.BasicAuth()
if !ok || username != "unai" || password != "secret" {
t.Fatalf("unexpected basic auth: ok=%v user=%q pass=%q", ok, username, password)
}
switch r.URL.Path {
case "/api/version":
versionHits++
_ = json.NewEncoder(w).Encode(map[string]any{"version": "0.1.0"})
case "/api/tags":
tagsHits++
_ = json.NewEncoder(w).Encode(map[string]any{"models": []map[string]any{{"name": "llama3:latest"}}})
default:
http.NotFound(w, r)
}
_ = json.NewEncoder(w).Encode(map[string]any{"version": "0.1.0"})
}))
defer ollama.Close()
@ -251,6 +270,8 @@ func TestAISettingsHandler_TestConnection_Ollama(t *testing.T) {
aiCfg.Enabled = true
aiCfg.Model = "ollama:llama3"
aiCfg.OllamaBaseURL = ollama.URL
aiCfg.OllamaUsername = "unai"
aiCfg.OllamaPassword = "secret"
if err := persistence.SaveAIConfig(*aiCfg); err != nil {
t.Fatalf("SaveAIConfig: %v", err)
}
@ -275,17 +296,31 @@ func TestAISettingsHandler_TestConnection_Ollama(t *testing.T) {
if !resp.Success {
t.Fatalf("expected success, got %+v", resp)
}
if versionHits != 1 || tagsHits != 1 {
t.Fatalf("expected version+tags check, got version=%d tags=%d", versionHits, tagsHits)
}
}
func TestAISettingsHandler_TestProvider_Ollama(t *testing.T) {
t.Parallel()
versionHits := 0
tagsHits := 0
ollama := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/api/version" {
http.NotFound(w, r)
return
username, password, ok := r.BasicAuth()
if !ok || username != "unai" || password != "secret" {
t.Fatalf("unexpected basic auth: ok=%v user=%q pass=%q", ok, username, password)
}
switch r.URL.Path {
case "/api/version":
versionHits++
_ = json.NewEncoder(w).Encode(map[string]any{"version": "0.1.0"})
case "/api/tags":
tagsHits++
_ = json.NewEncoder(w).Encode(map[string]any{"models": []map[string]any{{"name": "llama3:latest"}}})
default:
http.NotFound(w, r)
}
_ = json.NewEncoder(w).Encode(map[string]any{"version": "0.1.0"})
}))
defer ollama.Close()
@ -295,8 +330,11 @@ func TestAISettingsHandler_TestProvider_Ollama(t *testing.T) {
aiCfg := config.NewDefaultAIConfig()
aiCfg.Enabled = true
aiCfg.Model = "ollama:llama3"
aiCfg.Model = "openai:gpt-4o"
aiCfg.PatrolModel = "ollama:llama3"
aiCfg.OllamaBaseURL = ollama.URL
aiCfg.OllamaUsername = "unai"
aiCfg.OllamaPassword = "secret"
if err := persistence.SaveAIConfig(*aiCfg); err != nil {
t.Fatalf("SaveAIConfig: %v", err)
}
@ -321,6 +359,9 @@ func TestAISettingsHandler_TestProvider_Ollama(t *testing.T) {
if !resp.Success || resp.Provider != "ollama" {
t.Fatalf("unexpected response: %+v", resp)
}
if versionHits != 1 || tagsHits != 1 {
t.Fatalf("expected version+tags check, got version=%d tags=%d", versionHits, tagsHits)
}
}
// ========================================

View file

@ -35,6 +35,8 @@ type AIConfig struct {
DeepSeekAPIKey string `json:"deepseek_api_key,omitempty"` // DeepSeek API key
GeminiAPIKey string `json:"gemini_api_key,omitempty"` // Google Gemini API key
OllamaBaseURL string `json:"ollama_base_url,omitempty"` // Ollama server URL (default: http://localhost:11434)
OllamaUsername string `json:"ollama_username,omitempty"` // Optional Basic Auth username for Ollama
OllamaPassword string `json:"ollama_password,omitempty"` // Optional Basic Auth password for Ollama
OpenAIBaseURL string `json:"openai_base_url,omitempty"` // Custom OpenAI-compatible base URL (optional)
// OAuth fields for Claude Pro/Max subscription authentication
@ -412,6 +414,24 @@ func (c *AIConfig) GetModel() string {
}
}
// GetPreferredModelForProvider returns the most relevant configured model for a provider.
// It prefers explicitly selected models for that provider before falling back to the
// provider's default model string.
func (c *AIConfig) GetPreferredModelForProvider(provider string) string {
for _, candidate := range []string{c.Model, c.ChatModel, c.PatrolModel, c.AutoFixModel, c.DiscoveryModel} {
candidate = strings.TrimSpace(candidate)
if candidate == "" {
continue
}
candidateProvider, _ := ParseModelString(candidate)
if candidateProvider == provider {
return candidate
}
}
return DefaultModelForProvider(provider)
}
// GetChatModel returns the model for interactive chat conversations
// Falls back to the main Model if ChatModel is not set
func (c *AIConfig) GetChatModel() string {

View file

@ -628,6 +628,63 @@ func TestAIConfig_GetChatModel(t *testing.T) {
})
}
func TestAIConfig_GetPreferredModelForProvider(t *testing.T) {
tests := []struct {
name string
config AIConfig
provider string
expected string
}{
{
name: "uses main model when provider matches",
config: AIConfig{
Model: "ollama:llama3.2",
},
provider: AIProviderOllama,
expected: "ollama:llama3.2",
},
{
name: "falls back to patrol override for provider",
config: AIConfig{
Model: "openai:gpt-4o",
PatrolModel: "ollama:qwen2.5",
},
provider: AIProviderOllama,
expected: "ollama:qwen2.5",
},
{
name: "detects unprefixed ollama model",
config: AIConfig{
PatrolModel: "llama3.1",
},
provider: AIProviderOllama,
expected: "llama3.1",
},
{
name: "falls back to provider default when no model matches",
config: AIConfig{
Model: "openai:gpt-4o",
},
provider: AIProviderGemini,
expected: FormatModelString(AIProviderGemini, DefaultAIModelGemini),
},
{
name: "unknown provider returns empty",
config: AIConfig{Model: "openai:gpt-4o"},
provider: "unknown",
expected: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.config.GetPreferredModelForProvider(tt.provider); got != tt.expected {
t.Fatalf("GetPreferredModelForProvider(%q) = %q, want %q", tt.provider, got, tt.expected)
}
})
}
}
func TestAIConfig_GetPatrolModel(t *testing.T) {
t.Run("explicit patrol model", func(t *testing.T) {
config := AIConfig{