fix: Address review feedback from MODSetter

- Remove .env file (should not be committed to version control)
- Revert docker-compose.override.yml to use ghcr images instead of local builds
- Remove unnecessary DOCLING config section from __init__.py
- Remove model context window additions (litellm already provides this)
- Keep litellm<1.70.0 constraint due to Ollama vector_stores 404 errors

Addresses all feedback from PR #211 review
This commit is contained in:
Abdullah 3li 2025-07-21 09:54:39 +03:00
parent aa00822169
commit fe2317ae92
5 changed files with 3 additions and 46 deletions

17
.env
View file

@ -1,17 +0,0 @@
# Frontend Configuration
FRONTEND_PORT=3000
NEXT_PUBLIC_API_URL=http://backend:8000
# Backend Configuration
BACKEND_PORT=8000
# Database Configuration
POSTGRES_USER=postgres
POSTGRES_PASSWORD=postgres
POSTGRES_DB=surfsense
POSTGRES_PORT=5432
# pgAdmin Configuration
PGADMIN_PORT=5050
PGADMIN_DEFAULT_EMAIL=admin@surfsense.com
PGADMIN_DEFAULT_PASSWORD=surfsense

1
.gitignore vendored
View file

@ -2,3 +2,4 @@
podcasts/
reports/
SURFSENSE_CRITICAL_FIXES_REPORT.md
.env

View file

@ -2,7 +2,7 @@ version: '3.8'
services:
frontend:
build: ./surfsense_web
build: ghcr.io/modsetter/surfsense_ui:latest
ports:
- "${FRONTEND_PORT:-3000}:3000"
volumes:
@ -14,7 +14,7 @@ services:
- NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL:-http://backend:8000}
backend:
build: ./surfsense_backend
build: ghcr.io/modsetter/surfsense_backend:latest
ports:
- "${BACKEND_PORT:-8000}:8000"
volumes:

View file

@ -157,29 +157,6 @@ def find_optimal_documents_with_binary_search(
def get_model_context_window(model_name: str) -> int:
"""Get the total context window size for a model (input + output tokens)."""
# Known context windows for common models
model_contexts = {
'llama3.1:8b': 128000, # Llama 3.1 8B has 128K context
'llama3.1:70b': 128000, # Llama 3.1 70B has 128K context
'llama3.1:405b': 128000, # Llama 3.1 405B has 128K context
'llama3:8b': 8192, # Llama 3 8B has 8K context
'llama3:70b': 8192, # Llama 3 70B has 8K context
'ollama/llama3.1:8b': 128000,
'ollama/llama3.1:70b': 128000,
'ollama/llama3:8b': 8192,
'ollama/llama3:70b': 8192,
}
# Check for exact match first
if model_name in model_contexts:
return model_contexts[model_name]
# Check for partial matches (e.g., model_name contains 'llama3.1')
for model_key, context_size in model_contexts.items():
if model_key in model_name.lower():
return context_size
try:
model_info = get_model_info(model_name)
context_window = model_info.get(

View file

@ -84,10 +84,6 @@ class Config:
# LlamaCloud API Key
LLAMA_CLOUD_API_KEY = os.getenv("LLAMA_CLOUD_API_KEY")
elif ETL_SERVICE == "DOCLING":
# Docling doesn't require API keys - uses local processing
pass
# Firecrawl API Key
FIRECRAWL_API_KEY = os.getenv("FIRECRAWL_API_KEY", None)