mirror of
https://github.com/lfnovo/open-notebook.git
synced 2026-05-05 23:37:58 +00:00
Version 1 (#160)
New front-end Launch Chat API Manage Sources Enable re-embedding of all contents Sources can be added without a notebook now Improved settings Enable model selector on all chats Background processing for better experience Dark mode Improved Notes Improved Docs: - Remove all Streamlit references from documentation - Update deployment guides with React frontend setup - Fix Docker environment variables format (SURREAL_URL, SURREAL_PASSWORD) - Update docker image tag from :latest to :v1-latest - Change navigation references (Settings → Models to just Models) - Update development setup to include frontend npm commands - Add MIGRATION.md guide for users upgrading from Streamlit - Update quick-start guide with correct environment variables - Add port 5055 documentation for API access - Update project structure to reflect frontend/ directory - Remove outdated source-chat documentation files
This commit is contained in:
parent
124d7d110c
commit
b7e656a319
319 changed files with 46747 additions and 7408 deletions
|
|
@ -1,11 +1,18 @@
|
|||
import os
|
||||
from typing import List, Optional
|
||||
|
||||
from esperanto import AIFactory
|
||||
from fastapi import APIRouter, HTTPException, Query
|
||||
from loguru import logger
|
||||
|
||||
from api.models import DefaultModelsResponse, ModelCreate, ModelResponse
|
||||
from api.models import (
|
||||
DefaultModelsResponse,
|
||||
ModelCreate,
|
||||
ModelResponse,
|
||||
ProviderAvailabilityResponse,
|
||||
)
|
||||
from open_notebook.domain.models import DefaultModels, Model
|
||||
from open_notebook.exceptions import DatabaseOperationError, InvalidInputError
|
||||
from open_notebook.exceptions import InvalidInputError
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
|
@ -57,7 +64,7 @@ async def create_model(model_data: ModelCreate):
|
|||
await new_model.save()
|
||||
|
||||
return ModelResponse(
|
||||
id=new_model.id,
|
||||
id=new_model.id or "",
|
||||
name=new_model.name,
|
||||
provider=new_model.provider,
|
||||
type=new_model.type,
|
||||
|
|
@ -94,15 +101,15 @@ async def get_default_models():
|
|||
"""Get default model assignments."""
|
||||
try:
|
||||
defaults = await DefaultModels.get_instance()
|
||||
|
||||
|
||||
return DefaultModelsResponse(
|
||||
default_chat_model=defaults.default_chat_model,
|
||||
default_transformation_model=defaults.default_transformation_model,
|
||||
large_context_model=defaults.large_context_model,
|
||||
default_text_to_speech_model=defaults.default_text_to_speech_model,
|
||||
default_speech_to_text_model=defaults.default_speech_to_text_model,
|
||||
default_embedding_model=defaults.default_embedding_model,
|
||||
default_tools_model=defaults.default_tools_model,
|
||||
default_chat_model=defaults.default_chat_model, # type: ignore[attr-defined]
|
||||
default_transformation_model=defaults.default_transformation_model, # type: ignore[attr-defined]
|
||||
large_context_model=defaults.large_context_model, # type: ignore[attr-defined]
|
||||
default_text_to_speech_model=defaults.default_text_to_speech_model, # type: ignore[attr-defined]
|
||||
default_speech_to_text_model=defaults.default_speech_to_text_model, # type: ignore[attr-defined]
|
||||
default_embedding_model=defaults.default_embedding_model, # type: ignore[attr-defined]
|
||||
default_tools_model=defaults.default_tools_model, # type: ignore[attr-defined]
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching default models: {str(e)}")
|
||||
|
|
@ -117,19 +124,19 @@ async def update_default_models(defaults_data: DefaultModelsResponse):
|
|||
|
||||
# Update only provided fields
|
||||
if defaults_data.default_chat_model is not None:
|
||||
defaults.default_chat_model = defaults_data.default_chat_model
|
||||
defaults.default_chat_model = defaults_data.default_chat_model # type: ignore[attr-defined]
|
||||
if defaults_data.default_transformation_model is not None:
|
||||
defaults.default_transformation_model = defaults_data.default_transformation_model
|
||||
defaults.default_transformation_model = defaults_data.default_transformation_model # type: ignore[attr-defined]
|
||||
if defaults_data.large_context_model is not None:
|
||||
defaults.large_context_model = defaults_data.large_context_model
|
||||
defaults.large_context_model = defaults_data.large_context_model # type: ignore[attr-defined]
|
||||
if defaults_data.default_text_to_speech_model is not None:
|
||||
defaults.default_text_to_speech_model = defaults_data.default_text_to_speech_model
|
||||
defaults.default_text_to_speech_model = defaults_data.default_text_to_speech_model # type: ignore[attr-defined]
|
||||
if defaults_data.default_speech_to_text_model is not None:
|
||||
defaults.default_speech_to_text_model = defaults_data.default_speech_to_text_model
|
||||
defaults.default_speech_to_text_model = defaults_data.default_speech_to_text_model # type: ignore[attr-defined]
|
||||
if defaults_data.default_embedding_model is not None:
|
||||
defaults.default_embedding_model = defaults_data.default_embedding_model
|
||||
defaults.default_embedding_model = defaults_data.default_embedding_model # type: ignore[attr-defined]
|
||||
if defaults_data.default_tools_model is not None:
|
||||
defaults.default_tools_model = defaults_data.default_tools_model
|
||||
defaults.default_tools_model = defaults_data.default_tools_model # type: ignore[attr-defined]
|
||||
|
||||
await defaults.update()
|
||||
|
||||
|
|
@ -138,16 +145,74 @@ async def update_default_models(defaults_data: DefaultModelsResponse):
|
|||
await model_manager.refresh_defaults()
|
||||
|
||||
return DefaultModelsResponse(
|
||||
default_chat_model=defaults.default_chat_model,
|
||||
default_transformation_model=defaults.default_transformation_model,
|
||||
large_context_model=defaults.large_context_model,
|
||||
default_text_to_speech_model=defaults.default_text_to_speech_model,
|
||||
default_speech_to_text_model=defaults.default_speech_to_text_model,
|
||||
default_embedding_model=defaults.default_embedding_model,
|
||||
default_tools_model=defaults.default_tools_model,
|
||||
default_chat_model=defaults.default_chat_model, # type: ignore[attr-defined]
|
||||
default_transformation_model=defaults.default_transformation_model, # type: ignore[attr-defined]
|
||||
large_context_model=defaults.large_context_model, # type: ignore[attr-defined]
|
||||
default_text_to_speech_model=defaults.default_text_to_speech_model, # type: ignore[attr-defined]
|
||||
default_speech_to_text_model=defaults.default_speech_to_text_model, # type: ignore[attr-defined]
|
||||
default_embedding_model=defaults.default_embedding_model, # type: ignore[attr-defined]
|
||||
default_tools_model=defaults.default_tools_model, # type: ignore[attr-defined]
|
||||
)
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating default models: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Error updating default models: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Error updating default models: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/models/providers", response_model=ProviderAvailabilityResponse)
|
||||
async def get_provider_availability():
|
||||
"""Get provider availability based on environment variables."""
|
||||
try:
|
||||
# Check which providers have API keys configured
|
||||
provider_status = {
|
||||
"ollama": os.environ.get("OLLAMA_API_BASE") is not None,
|
||||
"openai": os.environ.get("OPENAI_API_KEY") is not None,
|
||||
"groq": os.environ.get("GROQ_API_KEY") is not None,
|
||||
"xai": os.environ.get("XAI_API_KEY") is not None,
|
||||
"vertex": (
|
||||
os.environ.get("VERTEX_PROJECT") is not None
|
||||
and os.environ.get("VERTEX_LOCATION") is not None
|
||||
and os.environ.get("GOOGLE_APPLICATION_CREDENTIALS") is not None
|
||||
),
|
||||
"google": (
|
||||
os.environ.get("GOOGLE_API_KEY") is not None
|
||||
or os.environ.get("GEMINI_API_KEY") is not None
|
||||
),
|
||||
"openrouter": os.environ.get("OPENROUTER_API_KEY") is not None,
|
||||
"anthropic": os.environ.get("ANTHROPIC_API_KEY") is not None,
|
||||
"elevenlabs": os.environ.get("ELEVENLABS_API_KEY") is not None,
|
||||
"voyage": os.environ.get("VOYAGE_API_KEY") is not None,
|
||||
"azure": (
|
||||
os.environ.get("AZURE_OPENAI_API_KEY") is not None
|
||||
and os.environ.get("AZURE_OPENAI_ENDPOINT") is not None
|
||||
and os.environ.get("AZURE_OPENAI_DEPLOYMENT_NAME") is not None
|
||||
and os.environ.get("AZURE_OPENAI_API_VERSION") is not None
|
||||
),
|
||||
"mistral": os.environ.get("MISTRAL_API_KEY") is not None,
|
||||
"deepseek": os.environ.get("DEEPSEEK_API_KEY") is not None,
|
||||
"openai-compatible": os.environ.get("OPENAI_COMPATIBLE_BASE_URL") is not None,
|
||||
}
|
||||
|
||||
available_providers = [k for k, v in provider_status.items() if v]
|
||||
unavailable_providers = [k for k, v in provider_status.items() if not v]
|
||||
|
||||
# Get supported model types from Esperanto
|
||||
esperanto_available = AIFactory.get_available_providers()
|
||||
|
||||
# Build supported types mapping only for available providers
|
||||
supported_types: dict[str, list[str]] = {}
|
||||
for provider in available_providers:
|
||||
supported_types[provider] = []
|
||||
for model_type, providers in esperanto_available.items():
|
||||
if provider in providers:
|
||||
supported_types[provider].append(model_type)
|
||||
|
||||
return ProviderAvailabilityResponse(
|
||||
available=available_providers,
|
||||
unavailable=unavailable_providers,
|
||||
supported_types=supported_types
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking provider availability: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Error checking provider availability: {str(e)}")
|
||||
Loading…
Add table
Add a link
Reference in a new issue