mirror of
https://github.com/lfnovo/open-notebook.git
synced 2026-05-03 13:50:31 +00:00
* feat(i18n): complete 100% internationalization and fix Next.js 15 compatibility
* feat(i18n): complete 100% internationalization coverage
* chore(test): finalize component tests and project cleanup
* test(logic): add unit tests for useModalManager hook
* fix(test): resolve timeout in AppSidebar tests by mocking TooltipProvider
* feat(i18n): comprehensive i18n audit, fixes for hardcoded strings, and complete zh-TW support
* fix(i18n): resolve TypeScript warnings and improve translation hook stability
- Remove unused useTranslation import from ConnectionGuard
- Add ref-based checking state to prevent dependency cycles
- Fix useTranslation hook to return empty string for undefined translations
- Add comment for backward compatibility on ExtractedReference interface
- Ensure .replace() string methods work safely with nested translation keys
* feat(i18n): complete internationalization implementation with Docker deployment
- Add LanguageLoadingOverlay component for smooth language transitions
- Update all translation files (en-US, zh-CN, zh-TW) with improved terminology
- Optimize Docker configuration for better performance
- Update version check and config handling for i18n support
- Fix route handling for language-specific content
- Add comprehensive task documentation
* fix(i18n): resolve localization errors, duplicates, and type issues
* chore(i18n): finalize 100% internationalization coverage
* chore(test): supplement i18n test cases and cleanup redundant files
* fix(test): resolve lint type errors and finalize delivery documents
* feat(i18n): finalize full internationalization and zh-TW localization
* fix(frontend): add missing devDependency and fix build tsconfig
* feat(ui): enhance sidebar hover effects with better visual feedback
* fix(frontend): resolve accessibility, i18n, and lint issues
- fix: add missing id, name, autocomplete attributes to dialog inputs
- fix: add aria labels and DialogDescription for accessibility
- fix: resolve uncontrolled component warning in SettingsForm
- fix: correct duplicate 'Traditional Chinese' label in zh-TW locale
- feat: add i18n support for podcast template names
- chore: fix lint errors in Dialogs
* fix: address all 21 PR feedback items from cubic-dev-ai bot
Configuration:
- Remove ignoreDuringBuilds flags from next.config.ts
Testing:
- Fix AppSidebar.test.tsx regex pattern and add missing assertion
Logic:
- Fix ConnectionGuard.tsx re-entry prevention logic
Internationalization (I18n) - Translations:
- Add missing keys: notebooks.archived, common.note/insight, accessibility keys
- Add specific keys: sources.allSourcesDescShort, transformations.selectModel
- Add singular/plural keys: podcasts.usedByCount_one/other, common.note/notes
- Add common.created/updated with {time} placeholder
Internationalization (I18n) - Usage:
- SourcesPage: use allSourcesDescShort instead of string splitting
- TransformationPlayground: use navigation.transformation and selectModel
- CommandPalette: use dedicated keys instead of string concatenation
- GeneratePodcastDialog: fix zh-TW date locale handling
- NotebookHeader: correctly interpolate {time} placeholder
- TransformationCard: use common.description instead of undefined key
- ChatPanel/SpeakerProfilesPanel: implement proper pluralization
- SystemInfo: correctly interpolate {version} placeholder
- LanguageLoadingOverlay: use t.common.loading instead of hardcoded string
- MessageActions: use specific error key cannotSaveNoteNoNotebook
Other:
- Fix SessionManager.tsx exhaustive-deps warning
* fix: remove duplicate locale keys and add missing zh-CN translations
- en-US: remove duplicate loading key (line 59) and addNew key (sources)
- zh-CN: remove duplicate common keys (loading, note, insight, newSource, newNotebook, newPodcast)
- zh-CN: remove duplicate accessibility.searchNotebooks key
- zh-CN: remove duplicate sources.addNew key
- zh-CN: remove duplicate navigation.transformation key
- zh-CN: add missing usedByCount_one and usedByCount_other keys in podcasts
- zh-TW: remove duplicate common keys (loading, note, insight, newSource, newNotebook, newPodcast)
- zh-TW: remove duplicate accessibility.searchNotebooks key
- zh-TW: remove duplicate sources.addNew key
* docs: remove info.md
* fix: remove duplicate notebook keys and unused ts-expect-error
- zh-CN: remove duplicate notebooks keys (archived, archive, unarchive, deleteNotebook, deleteNotebookDesc)
- zh-TW: remove duplicate notebooks keys (archived, archive, unarchive, deleteNotebook, deleteNotebookDesc)
- GeneratePodcastDialog: remove unused @ts-expect-error directive
* fix(a11y): fix unassociated labels in search page
- Replace <Label> with role='group' + aria-labelledby for search type section
- Replace <Label> with role='group' + aria-labelledby for search in section
- Follows WAI-ARIA best practices for labeling form field groups
* fix(a11y): fix unassociated labels across multiple components
- search/page.tsx: use role='group' + aria-labelledby for search type and search in sections
- RebuildEmbeddings.tsx: use role='group' + aria-labelledby for include checkboxes
- TransformationPlayground.tsx: replace Label with span for non-form output label
* chore: revert to npm stack and ensure i18n compatibility
* chore: polish zh-TW translations for better idiomatic usage
* fix: resolve linter errors (ruff import sort, mypy config duplicate)
* style: apply ruff formatting
* fix: finalize upstream compliance (Dockerfile.single, i18n hooks, docker-compose)
* style: polish strings, fix timeout cleanup, and improve test mocks
* fix: use relative imports in test setup to resolve IDE path errors
* perf(docker): optimize build speed by removing apt-get upgrade and build tools
- Remove apt-get upgrade from both builder and runtime stages (saves 10-15 min each)
- Remove gcc/g++/make/git from builder (uv downloads pre-built wheels)
- Add --no-install-recommends to minimize package footprint
- Keep npm mirror (npmmirror.com) for faster frontend deps
- Add npm registry config for reliable China network access
Also includes:
- fix(a11y): add missing labels and aria attributes to form fields
- fix(i18n): add 2s safety timeout to LanguageLoadingOverlay
- fix(i18n): add robustness checks to use-translation proxy
Build time reduced from 2+ hours to ~34 minutes (~70% improvement)
* fix(a11y): resolve 16 form field accessibility warnings in notebook and podcast pages
* fix(a11y): resolve 4 button and 1 select field accessibility warnings in models page
* fix(a11y): resolve redundant attributes and residual warnings in transformations and podcast forms
* fix(i18n): deep fix for language switch hang using proxy protection and safer access
* fix(a11y): add name attributes to ModelSelector, TransformationPlayground, and SourceDetailContent
* fix: add missing Label import to SourceDetailContent
* fix(i18n): use native react-i18next in LanguageLoadingOverlay to prevent hang during language switch
* fix(i18n): rewrite use-translation Proxy with strict depth limit and expanded blocked props to prevent language switch hang
* fix: add type assertion to fix TypeScript comparison error
* fix(i18n): disable useSuspense to prevent thread hang during language resource loading
* fix(i18n): add infinite loop detection circuit breaker to useTranslation hook
* fix(i18n): update traditional chinese label to native script in en-US
* feat: add new localization strings for notebook and note management.
* fix: resolve config priority, docker build deps, and ui glitches
* refactor: improve ui details and test coverage based on feedback
* refactor: improve ui details (version check/lang toggle) and test coverage
* fix: polish language matching and test cleanup
* fix(test): update mocks to resolve timeouts and proxy errors
* fix(frontend): restore tsconfig.json structure and enable IDE support for tests
* fix: address PR review findings and resolve CI OIDC failure
* fix: merge exception headers in custom handler
* fix: comprehensive PR review remediations and async performance fixes
* refactor: address all PR #371 review feedback
- Docker: consolidate SURREAL_URL to docker.env, add single-container override
- Security: restore apt-get upgrade in Dockerfile and Dockerfile.single
- Create centralized getDateLocale helper (lib/utils/date-locale.ts)
- Refactor 7 files to use getDateLocale helper
- Revert config/route.ts to origin/main version
- Move test files to co-located pattern (3 files)
- Remove local useTranslation mock from ConfirmDialog.test.tsx
- Simplify use-version-check to single useEffect pattern
- Fix test import paths after moving to co-located pattern
* fix: add jest-dom types for test files
* fix: address remaining review issues
- Add apt-get upgrade -y to Dockerfile.single backend-builder stage
- Refactor ChatColumn.test.tsx: use 'as unknown as ReturnType<typeof hook>' instead of 'as any'
- Use toBeInTheDocument() assertions instead of toBeDefined()
323 lines
13 KiB
Python
323 lines
13 KiB
Python
import os
|
|
from typing import List, Optional
|
|
|
|
from esperanto import AIFactory
|
|
from fastapi import APIRouter, HTTPException, Query
|
|
from loguru import logger
|
|
|
|
from api.models import (
|
|
DefaultModelsResponse,
|
|
ModelCreate,
|
|
ModelResponse,
|
|
ProviderAvailabilityResponse,
|
|
)
|
|
from open_notebook.ai.models import DefaultModels, Model
|
|
from open_notebook.exceptions import InvalidInputError
|
|
|
|
router = APIRouter()
|
|
|
|
|
|
def _check_openai_compatible_support(mode: str) -> bool:
|
|
"""
|
|
Check if OpenAI-compatible provider is available for a specific mode.
|
|
|
|
Args:
|
|
mode: One of 'LLM', 'EMBEDDING', 'STT', 'TTS'
|
|
|
|
Returns:
|
|
bool: True if either generic or mode-specific env var is set
|
|
"""
|
|
generic = os.environ.get("OPENAI_COMPATIBLE_BASE_URL") is not None
|
|
specific = os.environ.get(f"OPENAI_COMPATIBLE_BASE_URL_{mode}") is not None
|
|
return generic or specific
|
|
|
|
|
|
def _check_azure_support(mode: str) -> bool:
|
|
"""
|
|
Check if Azure OpenAI provider is available for a specific mode.
|
|
|
|
Args:
|
|
mode: One of 'LLM', 'EMBEDDING', 'STT', 'TTS'
|
|
|
|
Returns:
|
|
bool: True if either generic or mode-specific env vars are set
|
|
"""
|
|
# Check generic configuration (applies to all modes)
|
|
generic = (
|
|
os.environ.get("AZURE_OPENAI_API_KEY") is not None
|
|
and os.environ.get("AZURE_OPENAI_ENDPOINT") is not None
|
|
and os.environ.get("AZURE_OPENAI_API_VERSION") is not None
|
|
)
|
|
|
|
# Check mode-specific configuration (takes precedence)
|
|
specific = (
|
|
os.environ.get(f"AZURE_OPENAI_API_KEY_{mode}") is not None
|
|
and os.environ.get(f"AZURE_OPENAI_ENDPOINT_{mode}") is not None
|
|
and os.environ.get(f"AZURE_OPENAI_API_VERSION_{mode}") is not None
|
|
)
|
|
|
|
return generic or specific
|
|
|
|
|
|
@router.get("/models", response_model=List[ModelResponse])
|
|
async def get_models(
|
|
type: Optional[str] = Query(None, description="Filter by model type"),
|
|
):
|
|
"""Get all configured models with optional type filtering."""
|
|
try:
|
|
if type:
|
|
models = await Model.get_models_by_type(type)
|
|
else:
|
|
models = await Model.get_all()
|
|
|
|
return [
|
|
ModelResponse(
|
|
id=model.id,
|
|
name=model.name,
|
|
provider=model.provider,
|
|
type=model.type,
|
|
created=str(model.created),
|
|
updated=str(model.updated),
|
|
)
|
|
for model in models
|
|
]
|
|
except Exception as e:
|
|
logger.error(f"Error fetching models: {str(e)}")
|
|
raise HTTPException(status_code=500, detail=f"Error fetching models: {str(e)}")
|
|
|
|
|
|
@router.post("/models", response_model=ModelResponse)
|
|
async def create_model(model_data: ModelCreate):
|
|
"""Create a new model configuration."""
|
|
try:
|
|
# Validate model type
|
|
valid_types = ["language", "embedding", "text_to_speech", "speech_to_text"]
|
|
if model_data.type not in valid_types:
|
|
raise HTTPException(
|
|
status_code=400,
|
|
detail=f"Invalid model type. Must be one of: {valid_types}",
|
|
)
|
|
|
|
# Check for duplicate model name under the same provider and type (case-insensitive)
|
|
from open_notebook.database.repository import repo_query
|
|
|
|
existing = await repo_query(
|
|
"SELECT * FROM model WHERE string::lowercase(provider) = $provider AND string::lowercase(name) = $name AND string::lowercase(type) = $type LIMIT 1",
|
|
{
|
|
"provider": model_data.provider.lower(),
|
|
"name": model_data.name.lower(),
|
|
"type": model_data.type.lower(),
|
|
},
|
|
)
|
|
if existing:
|
|
raise HTTPException(
|
|
status_code=400,
|
|
detail=f"Model '{model_data.name}' already exists for provider '{model_data.provider}' with type '{model_data.type}'",
|
|
)
|
|
|
|
new_model = Model(
|
|
name=model_data.name,
|
|
provider=model_data.provider,
|
|
type=model_data.type,
|
|
)
|
|
await new_model.save()
|
|
|
|
return ModelResponse(
|
|
id=new_model.id or "",
|
|
name=new_model.name,
|
|
provider=new_model.provider,
|
|
type=new_model.type,
|
|
created=str(new_model.created),
|
|
updated=str(new_model.updated),
|
|
)
|
|
except HTTPException:
|
|
raise
|
|
except InvalidInputError as e:
|
|
raise HTTPException(status_code=400, detail=str(e))
|
|
except Exception as e:
|
|
logger.error(f"Error creating model: {str(e)}")
|
|
raise HTTPException(status_code=500, detail=f"Error creating model: {str(e)}")
|
|
|
|
|
|
@router.delete("/models/{model_id}")
|
|
async def delete_model(model_id: str):
|
|
"""Delete a model configuration."""
|
|
try:
|
|
model = await Model.get(model_id)
|
|
if not model:
|
|
raise HTTPException(status_code=404, detail="Model not found")
|
|
|
|
await model.delete()
|
|
|
|
return {"message": "Model deleted successfully"}
|
|
except HTTPException:
|
|
raise
|
|
except Exception as e:
|
|
logger.error(f"Error deleting model {model_id}: {str(e)}")
|
|
raise HTTPException(status_code=500, detail=f"Error deleting model: {str(e)}")
|
|
|
|
|
|
@router.get("/models/defaults", response_model=DefaultModelsResponse)
|
|
async def get_default_models():
|
|
"""Get default model assignments."""
|
|
try:
|
|
defaults = await DefaultModels.get_instance()
|
|
|
|
return DefaultModelsResponse(
|
|
default_chat_model=defaults.default_chat_model, # type: ignore[attr-defined]
|
|
default_transformation_model=defaults.default_transformation_model, # type: ignore[attr-defined]
|
|
large_context_model=defaults.large_context_model, # type: ignore[attr-defined]
|
|
default_text_to_speech_model=defaults.default_text_to_speech_model, # type: ignore[attr-defined]
|
|
default_speech_to_text_model=defaults.default_speech_to_text_model, # type: ignore[attr-defined]
|
|
default_embedding_model=defaults.default_embedding_model, # type: ignore[attr-defined]
|
|
default_tools_model=defaults.default_tools_model, # type: ignore[attr-defined]
|
|
)
|
|
except Exception as e:
|
|
logger.error(f"Error fetching default models: {str(e)}")
|
|
raise HTTPException(
|
|
status_code=500, detail=f"Error fetching default models: {str(e)}"
|
|
)
|
|
|
|
|
|
@router.put("/models/defaults", response_model=DefaultModelsResponse)
|
|
async def update_default_models(defaults_data: DefaultModelsResponse):
|
|
"""Update default model assignments."""
|
|
try:
|
|
defaults = await DefaultModels.get_instance()
|
|
|
|
# Update only provided fields
|
|
if defaults_data.default_chat_model is not None:
|
|
defaults.default_chat_model = defaults_data.default_chat_model # type: ignore[attr-defined]
|
|
if defaults_data.default_transformation_model is not None:
|
|
defaults.default_transformation_model = (
|
|
defaults_data.default_transformation_model
|
|
) # type: ignore[attr-defined]
|
|
if defaults_data.large_context_model is not None:
|
|
defaults.large_context_model = defaults_data.large_context_model # type: ignore[attr-defined]
|
|
if defaults_data.default_text_to_speech_model is not None:
|
|
defaults.default_text_to_speech_model = (
|
|
defaults_data.default_text_to_speech_model
|
|
) # type: ignore[attr-defined]
|
|
if defaults_data.default_speech_to_text_model is not None:
|
|
defaults.default_speech_to_text_model = (
|
|
defaults_data.default_speech_to_text_model
|
|
) # type: ignore[attr-defined]
|
|
if defaults_data.default_embedding_model is not None:
|
|
defaults.default_embedding_model = defaults_data.default_embedding_model # type: ignore[attr-defined]
|
|
if defaults_data.default_tools_model is not None:
|
|
defaults.default_tools_model = defaults_data.default_tools_model # type: ignore[attr-defined]
|
|
|
|
await defaults.update()
|
|
|
|
# No cache refresh needed - next access will fetch fresh data from DB
|
|
|
|
return DefaultModelsResponse(
|
|
default_chat_model=defaults.default_chat_model, # type: ignore[attr-defined]
|
|
default_transformation_model=defaults.default_transformation_model, # type: ignore[attr-defined]
|
|
large_context_model=defaults.large_context_model, # type: ignore[attr-defined]
|
|
default_text_to_speech_model=defaults.default_text_to_speech_model, # type: ignore[attr-defined]
|
|
default_speech_to_text_model=defaults.default_speech_to_text_model, # type: ignore[attr-defined]
|
|
default_embedding_model=defaults.default_embedding_model, # type: ignore[attr-defined]
|
|
default_tools_model=defaults.default_tools_model, # type: ignore[attr-defined]
|
|
)
|
|
except HTTPException:
|
|
raise
|
|
except Exception as e:
|
|
logger.error(f"Error updating default models: {str(e)}")
|
|
raise HTTPException(
|
|
status_code=500, detail=f"Error updating default models: {str(e)}"
|
|
)
|
|
|
|
|
|
@router.get("/models/providers", response_model=ProviderAvailabilityResponse)
|
|
async def get_provider_availability():
|
|
"""Get provider availability based on environment variables."""
|
|
try:
|
|
# Check which providers have API keys configured
|
|
provider_status = {
|
|
"ollama": os.environ.get("OLLAMA_API_BASE") is not None,
|
|
"openai": os.environ.get("OPENAI_API_KEY") is not None,
|
|
"groq": os.environ.get("GROQ_API_KEY") is not None,
|
|
"xai": os.environ.get("XAI_API_KEY") is not None,
|
|
"vertex": (
|
|
os.environ.get("VERTEX_PROJECT") is not None
|
|
and os.environ.get("VERTEX_LOCATION") is not None
|
|
and os.environ.get("GOOGLE_APPLICATION_CREDENTIALS") is not None
|
|
),
|
|
"google": (
|
|
os.environ.get("GOOGLE_API_KEY") is not None
|
|
or os.environ.get("GEMINI_API_KEY") is not None
|
|
),
|
|
"openrouter": os.environ.get("OPENROUTER_API_KEY") is not None,
|
|
"anthropic": os.environ.get("ANTHROPIC_API_KEY") is not None,
|
|
"elevenlabs": os.environ.get("ELEVENLABS_API_KEY") is not None,
|
|
"voyage": os.environ.get("VOYAGE_API_KEY") is not None,
|
|
"azure": (
|
|
_check_azure_support("LLM")
|
|
or _check_azure_support("EMBEDDING")
|
|
or _check_azure_support("STT")
|
|
or _check_azure_support("TTS")
|
|
),
|
|
"mistral": os.environ.get("MISTRAL_API_KEY") is not None,
|
|
"deepseek": os.environ.get("DEEPSEEK_API_KEY") is not None,
|
|
"openai-compatible": (
|
|
_check_openai_compatible_support("LLM")
|
|
or _check_openai_compatible_support("EMBEDDING")
|
|
or _check_openai_compatible_support("STT")
|
|
or _check_openai_compatible_support("TTS")
|
|
),
|
|
}
|
|
|
|
available_providers = [k for k, v in provider_status.items() if v]
|
|
unavailable_providers = [k for k, v in provider_status.items() if not v]
|
|
|
|
# Get supported model types from Esperanto
|
|
esperanto_available = AIFactory.get_available_providers()
|
|
|
|
# Build supported types mapping only for available providers
|
|
supported_types: dict[str, list[str]] = {}
|
|
for provider in available_providers:
|
|
supported_types[provider] = []
|
|
|
|
# Map Esperanto model types to our environment variable modes
|
|
mode_mapping = {
|
|
"language": "LLM",
|
|
"embedding": "EMBEDDING",
|
|
"speech_to_text": "STT",
|
|
"text_to_speech": "TTS",
|
|
}
|
|
|
|
# Special handling for openai-compatible to check mode-specific availability
|
|
if provider == "openai-compatible":
|
|
for model_type, mode in mode_mapping.items():
|
|
if (
|
|
model_type in esperanto_available
|
|
and provider in esperanto_available[model_type]
|
|
):
|
|
if _check_openai_compatible_support(mode):
|
|
supported_types[provider].append(model_type)
|
|
# Special handling for azure to check mode-specific availability
|
|
elif provider == "azure":
|
|
for model_type, mode in mode_mapping.items():
|
|
if (
|
|
model_type in esperanto_available
|
|
and provider in esperanto_available[model_type]
|
|
):
|
|
if _check_azure_support(mode):
|
|
supported_types[provider].append(model_type)
|
|
else:
|
|
# Standard provider detection
|
|
for model_type, providers in esperanto_available.items():
|
|
if provider in providers:
|
|
supported_types[provider].append(model_type)
|
|
|
|
return ProviderAvailabilityResponse(
|
|
available=available_providers,
|
|
unavailable=unavailable_providers,
|
|
supported_types=supported_types,
|
|
)
|
|
except Exception as e:
|
|
logger.error(f"Error checking provider availability: {str(e)}")
|
|
raise HTTPException(
|
|
status_code=500, detail=f"Error checking provider availability: {str(e)}"
|
|
)
|