diff --git a/.env b/.env deleted file mode 100644 index cc6ebe3..0000000 --- a/.env +++ /dev/null @@ -1,17 +0,0 @@ -# Frontend Configuration -FRONTEND_PORT=3000 -NEXT_PUBLIC_API_URL=http://backend:8000 - -# Backend Configuration -BACKEND_PORT=8000 - -# Database Configuration -POSTGRES_USER=postgres -POSTGRES_PASSWORD=postgres -POSTGRES_DB=surfsense -POSTGRES_PORT=5432 - -# pgAdmin Configuration -PGADMIN_PORT=5050 -PGADMIN_DEFAULT_EMAIL=admin@surfsense.com -PGADMIN_DEFAULT_PASSWORD=surfsense diff --git a/.gitignore b/.gitignore index dd3ea34..216e97b 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ podcasts/ reports/ SURFSENSE_CRITICAL_FIXES_REPORT.md +.env diff --git a/docker-compose.override.yml b/docker-compose.override.yml index 642b20f..3b4b6eb 100644 --- a/docker-compose.override.yml +++ b/docker-compose.override.yml @@ -2,7 +2,7 @@ version: '3.8' services: frontend: - build: ./surfsense_web + build: ghcr.io/modsetter/surfsense_ui:latest ports: - "${FRONTEND_PORT:-3000}:3000" volumes: @@ -14,7 +14,7 @@ services: - NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL:-http://backend:8000} backend: - build: ./surfsense_backend + build: ghcr.io/modsetter/surfsense_backend:latest ports: - "${BACKEND_PORT:-8000}:8000" volumes: diff --git a/surfsense_backend/app/agents/researcher/utils.py b/surfsense_backend/app/agents/researcher/utils.py index f8ec8cd..c4991cc 100644 --- a/surfsense_backend/app/agents/researcher/utils.py +++ b/surfsense_backend/app/agents/researcher/utils.py @@ -157,29 +157,6 @@ def find_optimal_documents_with_binary_search( def get_model_context_window(model_name: str) -> int: """Get the total context window size for a model (input + output tokens).""" - - # Known context windows for common models - model_contexts = { - 'llama3.1:8b': 128000, # Llama 3.1 8B has 128K context - 'llama3.1:70b': 128000, # Llama 3.1 70B has 128K context - 'llama3.1:405b': 128000, # Llama 3.1 405B has 128K context - 'llama3:8b': 8192, # Llama 3 8B has 8K context - 'llama3:70b': 8192, # Llama 3 70B has 8K context - 'ollama/llama3.1:8b': 128000, - 'ollama/llama3.1:70b': 128000, - 'ollama/llama3:8b': 8192, - 'ollama/llama3:70b': 8192, - } - - # Check for exact match first - if model_name in model_contexts: - return model_contexts[model_name] - - # Check for partial matches (e.g., model_name contains 'llama3.1') - for model_key, context_size in model_contexts.items(): - if model_key in model_name.lower(): - return context_size - try: model_info = get_model_info(model_name) context_window = model_info.get( diff --git a/surfsense_backend/app/config/__init__.py b/surfsense_backend/app/config/__init__.py index 879ecaa..06c89ae 100644 --- a/surfsense_backend/app/config/__init__.py +++ b/surfsense_backend/app/config/__init__.py @@ -84,10 +84,6 @@ class Config: # LlamaCloud API Key LLAMA_CLOUD_API_KEY = os.getenv("LLAMA_CLOUD_API_KEY") - elif ETL_SERVICE == "DOCLING": - # Docling doesn't require API keys - uses local processing - pass - # Firecrawl API Key FIRECRAWL_API_KEY = os.getenv("FIRECRAWL_API_KEY", None)