Merge remote-tracking branch 'upstream/dev' into impr/thinking-steps

This commit is contained in:
Anish Sarkar 2026-03-25 01:50:10 +05:30
commit 778cfac6fa
96 changed files with 4065 additions and 3274 deletions

View file

@ -57,7 +57,7 @@ jobs:
working-directory: surfsense_web
env:
NEXT_PUBLIC_FASTAPI_BACKEND_URL: ${{ vars.NEXT_PUBLIC_FASTAPI_BACKEND_URL }}
NEXT_PUBLIC_ELECTRIC_URL: ${{ vars.NEXT_PUBLIC_ELECTRIC_URL }}
NEXT_PUBLIC_ZERO_CACHE_URL: ${{ vars.NEXT_PUBLIC_ZERO_CACHE_URL }}
NEXT_PUBLIC_DEPLOYMENT_MODE: ${{ vars.NEXT_PUBLIC_DEPLOYMENT_MODE }}
NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE: ${{ vars.NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE }}

View file

@ -164,8 +164,7 @@ jobs:
${{ matrix.image == 'web' && 'NEXT_PUBLIC_FASTAPI_BACKEND_URL=__NEXT_PUBLIC_FASTAPI_BACKEND_URL__' || '' }}
${{ matrix.image == 'web' && 'NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=__NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE__' || '' }}
${{ matrix.image == 'web' && 'NEXT_PUBLIC_ETL_SERVICE=__NEXT_PUBLIC_ETL_SERVICE__' || '' }}
${{ matrix.image == 'web' && 'NEXT_PUBLIC_ELECTRIC_URL=__NEXT_PUBLIC_ELECTRIC_URL__' || '' }}
${{ matrix.image == 'web' && 'NEXT_PUBLIC_ELECTRIC_AUTH_MODE=__NEXT_PUBLIC_ELECTRIC_AUTH_MODE__' || '' }}
${{ matrix.image == 'web' && 'NEXT_PUBLIC_ZERO_CACHE_URL=__NEXT_PUBLIC_ZERO_CACHE_URL__' || '' }}
${{ matrix.image == 'web' && 'NEXT_PUBLIC_DEPLOYMENT_MODE=__NEXT_PUBLIC_DEPLOYMENT_MODE__' || '' }}
- name: Export digest

View file

@ -35,7 +35,7 @@ EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# BACKEND_PORT=8929
# FRONTEND_PORT=3929
# ELECTRIC_PORT=5929
# ZERO_CACHE_PORT=5929
# SEARXNG_PORT=8888
# FLOWER_PORT=5555
@ -58,7 +58,6 @@ EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=LOCAL
# NEXT_PUBLIC_ETL_SERVICE=DOCLING
# NEXT_PUBLIC_DEPLOYMENT_MODE=self-hosted
# NEXT_PUBLIC_ELECTRIC_AUTH_MODE=insecure
# ------------------------------------------------------------------------------
# Custom Domain / Reverse Proxy
@ -71,8 +70,35 @@ EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# NEXT_FRONTEND_URL=https://app.yourdomain.com
# BACKEND_URL=https://api.yourdomain.com
# NEXT_PUBLIC_FASTAPI_BACKEND_URL=https://api.yourdomain.com
# NEXT_PUBLIC_ELECTRIC_URL=https://electric.yourdomain.com
# NEXT_PUBLIC_ZERO_CACHE_URL=https://zero.yourdomain.com
# ------------------------------------------------------------------------------
# Zero-cache (real-time sync)
# ------------------------------------------------------------------------------
# Defaults work out of the box for Docker deployments.
# Change ZERO_ADMIN_PASSWORD for security in production.
# ZERO_ADMIN_PASSWORD=surfsense-zero-admin
# Full override for the Zero → Postgres connection URLs.
# Leave commented out to use the Docker-managed `db` container (default).
# ZERO_UPSTREAM_DB=postgresql://surfsense:surfsense@db:5432/surfsense
# ZERO_CVR_DB=postgresql://surfsense:surfsense@db:5432/surfsense
# ZERO_CHANGE_DB=postgresql://surfsense:surfsense@db:5432/surfsense
# ZERO_QUERY_URL: where zero-cache forwards query requests for resolution.
# ZERO_MUTATE_URL: required by zero-cache when auth tokens are used, even though
# SurfSense does not use Zero mutators. Setting both URLs tells zero-cache to
# skip its own JWT verification and let the app endpoints handle auth instead.
# The mutate endpoint is a no-op that returns an empty response.
# Default: Docker service networking (http://frontend:3000/api/zero/...).
# Override when running the frontend outside Docker:
# ZERO_QUERY_URL=http://host.docker.internal:3000/api/zero/query
# ZERO_MUTATE_URL=http://host.docker.internal:3000/api/zero/mutate
# Override for custom domain:
# ZERO_QUERY_URL=https://app.yourdomain.com/api/zero/query
# ZERO_MUTATE_URL=https://app.yourdomain.com/api/zero/mutate
# ZERO_QUERY_URL=http://frontend:3000/api/zero/query
# ZERO_MUTATE_URL=http://frontend:3000/api/zero/mutate
# ------------------------------------------------------------------------------
# Database (defaults work out of the box, change for security)
@ -101,19 +127,6 @@ EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# Supports TLS: rediss://:password@host:6380/0
# REDIS_URL=redis://redis:6379/0
# ------------------------------------------------------------------------------
# Electric SQL (real-time sync credentials)
# ------------------------------------------------------------------------------
# These must match on the db, backend, and electric services.
# Change for security; defaults work out of the box.
# ELECTRIC_DB_USER=electric
# ELECTRIC_DB_PASSWORD=electric_password
# Full override for the Electric → Postgres connection URL.
# Leave commented out to use the Docker-managed `db` container (default).
# Uncomment and set `db` to `host.docker.internal` when pointing Electric at a local Postgres instance (e.g. Postgres.app on macOS):
# ELECTRIC_DATABASE_URL=postgresql://electric:electric_password@db:5432/surfsense?sslmode=disable
# ------------------------------------------------------------------------------
# TTS & STT (Text-to-Speech / Speech-to-Text)
# ------------------------------------------------------------------------------

View file

@ -18,13 +18,10 @@ services:
volumes:
- postgres_data:/var/lib/postgresql/data
- ./postgresql.conf:/etc/postgresql/postgresql.conf:ro
- ./scripts/init-electric-user.sh:/docker-entrypoint-initdb.d/init-electric-user.sh:ro
environment:
- POSTGRES_USER=${DB_USER:-postgres}
- POSTGRES_PASSWORD=${DB_PASSWORD:-postgres}
- POSTGRES_DB=${DB_NAME:-surfsense}
- ELECTRIC_DB_USER=${ELECTRIC_DB_USER:-electric}
- ELECTRIC_DB_PASSWORD=${ELECTRIC_DB_PASSWORD:-electric_password}
command: postgres -c config_file=/etc/postgresql/postgresql.conf
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USER:-postgres} -d ${DB_NAME:-surfsense}"]
@ -91,8 +88,6 @@ services:
- UNSTRUCTURED_HAS_PATCHED_LOOP=1
- LANGCHAIN_TRACING_V2=false
- LANGSMITH_TRACING=false
- ELECTRIC_DB_USER=${ELECTRIC_DB_USER:-electric}
- ELECTRIC_DB_PASSWORD=${ELECTRIC_DB_PASSWORD:-electric_password}
- AUTH_TYPE=${AUTH_TYPE:-LOCAL}
- NEXT_FRONTEND_URL=${NEXT_FRONTEND_URL:-http://localhost:3000}
- SEARXNG_DEFAULT_HOST=${SEARXNG_DEFAULT_HOST:-http://searxng:8080}
@ -130,8 +125,6 @@ services:
- REDIS_APP_URL=${REDIS_URL:-redis://redis:6379/0}
- CELERY_TASK_DEFAULT_QUEUE=surfsense
- PYTHONPATH=/app
- ELECTRIC_DB_USER=${ELECTRIC_DB_USER:-electric}
- ELECTRIC_DB_PASSWORD=${ELECTRIC_DB_PASSWORD:-electric_password}
- SEARXNG_DEFAULT_HOST=${SEARXNG_DEFAULT_HOST:-http://searxng:8080}
- SERVICE_ROLE=worker
depends_on:
@ -176,20 +169,28 @@ services:
# - redis
# - celery_worker
electric:
image: electricsql/electric:1.4.10
zero-cache:
image: rocicorp/zero:0.26.2
ports:
- "${ELECTRIC_PORT:-5133}:3000"
- "${ZERO_CACHE_PORT:-4848}:4848"
extra_hosts:
- "host.docker.internal:host-gateway"
depends_on:
db:
condition: service_healthy
environment:
- DATABASE_URL=${ELECTRIC_DATABASE_URL:-postgresql://${ELECTRIC_DB_USER:-electric}:${ELECTRIC_DB_PASSWORD:-electric_password}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}?sslmode=${DB_SSLMODE:-disable}}
- ELECTRIC_INSECURE=true
- ELECTRIC_WRITE_TO_PG_MODE=direct
- ZERO_UPSTREAM_DB=${ZERO_UPSTREAM_DB:-postgresql://${DB_USER:-postgres}:${DB_PASSWORD:-postgres}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}?sslmode=${DB_SSLMODE:-disable}}
- ZERO_CVR_DB=${ZERO_CVR_DB:-postgresql://${DB_USER:-postgres}:${DB_PASSWORD:-postgres}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}?sslmode=${DB_SSLMODE:-disable}}
- ZERO_CHANGE_DB=${ZERO_CHANGE_DB:-postgresql://${DB_USER:-postgres}:${DB_PASSWORD:-postgres}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}?sslmode=${DB_SSLMODE:-disable}}
- ZERO_REPLICA_FILE=/data/zero.db
- ZERO_ADMIN_PASSWORD=${ZERO_ADMIN_PASSWORD:-surfsense-zero-admin}
- ZERO_QUERY_URL=${ZERO_QUERY_URL:-http://frontend:3000/api/zero/query}
- ZERO_MUTATE_URL=${ZERO_MUTATE_URL:-http://frontend:3000/api/zero/mutate}
volumes:
- zero_cache_data:/data
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/v1/health"]
test: ["CMD", "curl", "-f", "http://localhost:4848/keepalive"]
interval: 10s
timeout: 5s
retries: 5
@ -201,8 +202,7 @@ services:
NEXT_PUBLIC_FASTAPI_BACKEND_URL: ${NEXT_PUBLIC_FASTAPI_BACKEND_URL:-http://localhost:8000}
NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE: ${NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE:-LOCAL}
NEXT_PUBLIC_ETL_SERVICE: ${NEXT_PUBLIC_ETL_SERVICE:-DOCLING}
NEXT_PUBLIC_ELECTRIC_URL: ${NEXT_PUBLIC_ELECTRIC_URL:-http://localhost:5133}
NEXT_PUBLIC_ELECTRIC_AUTH_MODE: ${NEXT_PUBLIC_ELECTRIC_AUTH_MODE:-insecure}
NEXT_PUBLIC_ZERO_CACHE_URL: ${NEXT_PUBLIC_ZERO_CACHE_URL:-http://localhost:${ZERO_CACHE_PORT:-4848}}
NEXT_PUBLIC_DEPLOYMENT_MODE: ${NEXT_PUBLIC_DEPLOYMENT_MODE:-self-hosted}
ports:
- "${FRONTEND_PORT:-3000}:3000"
@ -211,7 +211,7 @@ services:
depends_on:
backend:
condition: service_healthy
electric:
zero-cache:
condition: service_healthy
volumes:
@ -223,3 +223,5 @@ volumes:
name: surfsense-dev-redis
shared_temp:
name: surfsense-dev-shared-temp
zero_cache_data:
name: surfsense-dev-zero-cache

View file

@ -15,13 +15,10 @@ services:
volumes:
- postgres_data:/var/lib/postgresql/data
- ./postgresql.conf:/etc/postgresql/postgresql.conf:ro
- ./scripts/init-electric-user.sh:/docker-entrypoint-initdb.d/init-electric-user.sh:ro
environment:
POSTGRES_USER: ${DB_USER:-surfsense}
POSTGRES_PASSWORD: ${DB_PASSWORD:-surfsense}
POSTGRES_DB: ${DB_NAME:-surfsense}
ELECTRIC_DB_USER: ${ELECTRIC_DB_USER:-electric}
ELECTRIC_DB_PASSWORD: ${ELECTRIC_DB_PASSWORD:-electric_password}
command: postgres -c config_file=/etc/postgresql/postgresql.conf
restart: unless-stopped
healthcheck:
@ -72,8 +69,6 @@ services:
PYTHONPATH: /app
UVICORN_LOOP: asyncio
UNSTRUCTURED_HAS_PATCHED_LOOP: "1"
ELECTRIC_DB_USER: ${ELECTRIC_DB_USER:-electric}
ELECTRIC_DB_PASSWORD: ${ELECTRIC_DB_PASSWORD:-electric_password}
NEXT_FRONTEND_URL: ${NEXT_FRONTEND_URL:-http://localhost:${FRONTEND_PORT:-3929}}
SEARXNG_DEFAULT_HOST: ${SEARXNG_DEFAULT_HOST:-http://searxng:8080}
# Daytona Sandbox uncomment and set credentials to enable cloud code execution
@ -112,8 +107,6 @@ services:
REDIS_APP_URL: ${REDIS_URL:-redis://redis:6379/0}
CELERY_TASK_DEFAULT_QUEUE: surfsense
PYTHONPATH: /app
ELECTRIC_DB_USER: ${ELECTRIC_DB_USER:-electric}
ELECTRIC_DB_PASSWORD: ${ELECTRIC_DB_PASSWORD:-electric_password}
SEARXNG_DEFAULT_HOST: ${SEARXNG_DEFAULT_HOST:-http://searxng:8080}
SERVICE_ROLE: worker
depends_on:
@ -165,20 +158,28 @@ services:
# - celery_worker
# restart: unless-stopped
electric:
image: electricsql/electric:1.4.10
zero-cache:
image: rocicorp/zero:0.26.2
ports:
- "${ELECTRIC_PORT:-5929}:3000"
- "${ZERO_CACHE_PORT:-5929}:4848"
extra_hosts:
- "host.docker.internal:host-gateway"
environment:
DATABASE_URL: ${ELECTRIC_DATABASE_URL:-postgresql://${ELECTRIC_DB_USER:-electric}:${ELECTRIC_DB_PASSWORD:-electric_password}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}?sslmode=${DB_SSLMODE:-disable}}
ELECTRIC_INSECURE: "true"
ELECTRIC_WRITE_TO_PG_MODE: direct
ZERO_UPSTREAM_DB: ${ZERO_UPSTREAM_DB:-postgresql://${DB_USER:-surfsense}:${DB_PASSWORD:-surfsense}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}?sslmode=${DB_SSLMODE:-disable}}
ZERO_CVR_DB: ${ZERO_CVR_DB:-postgresql://${DB_USER:-surfsense}:${DB_PASSWORD:-surfsense}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}?sslmode=${DB_SSLMODE:-disable}}
ZERO_CHANGE_DB: ${ZERO_CHANGE_DB:-postgresql://${DB_USER:-surfsense}:${DB_PASSWORD:-surfsense}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}?sslmode=${DB_SSLMODE:-disable}}
ZERO_REPLICA_FILE: /data/zero.db
ZERO_ADMIN_PASSWORD: ${ZERO_ADMIN_PASSWORD:-surfsense-zero-admin}
ZERO_QUERY_URL: ${ZERO_QUERY_URL:-http://frontend:3000/api/zero/query}
ZERO_MUTATE_URL: ${ZERO_MUTATE_URL:-http://frontend:3000/api/zero/mutate}
volumes:
- zero_cache_data:/data
restart: unless-stopped
depends_on:
db:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/v1/health"]
test: ["CMD", "curl", "-f", "http://localhost:4848/keepalive"]
interval: 10s
timeout: 5s
retries: 5
@ -189,17 +190,16 @@ services:
- "${FRONTEND_PORT:-3929}:3000"
environment:
NEXT_PUBLIC_FASTAPI_BACKEND_URL: ${NEXT_PUBLIC_FASTAPI_BACKEND_URL:-http://localhost:${BACKEND_PORT:-8929}}
NEXT_PUBLIC_ELECTRIC_URL: ${NEXT_PUBLIC_ELECTRIC_URL:-http://localhost:${ELECTRIC_PORT:-5929}}
NEXT_PUBLIC_ZERO_CACHE_URL: ${NEXT_PUBLIC_ZERO_CACHE_URL:-http://localhost:${ZERO_CACHE_PORT:-5929}}
NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE: ${AUTH_TYPE:-LOCAL}
NEXT_PUBLIC_ETL_SERVICE: ${ETL_SERVICE:-DOCLING}
NEXT_PUBLIC_DEPLOYMENT_MODE: ${DEPLOYMENT_MODE:-self-hosted}
NEXT_PUBLIC_ELECTRIC_AUTH_MODE: ${NEXT_PUBLIC_ELECTRIC_AUTH_MODE:-insecure}
labels:
- "com.centurylinklabs.watchtower.enable=true"
depends_on:
backend:
condition: service_healthy
electric:
zero-cache:
condition: service_healthy
restart: unless-stopped
@ -210,3 +210,5 @@ volumes:
name: surfsense-redis
shared_temp:
name: surfsense-shared-temp
zero_cache_data:
name: surfsense-zero-cache

View file

@ -1,11 +1,11 @@
# PostgreSQL configuration for Electric SQL
# PostgreSQL configuration for SurfSense
# This file is mounted into the PostgreSQL container
listen_addresses = '*'
max_connections = 200
shared_buffers = 256MB
# Enable logical replication (required for Electric SQL)
# Enable logical replication (required for Zero-cache real-time sync)
wal_level = logical
max_replication_slots = 10
max_wal_senders = 10

View file

@ -1,38 +0,0 @@
#!/bin/sh
# Creates the Electric SQL replication user on first DB initialization.
# Idempotent — safe to run alongside Alembic migration 66.
set -e
ELECTRIC_DB_USER="${ELECTRIC_DB_USER:-electric}"
ELECTRIC_DB_PASSWORD="${ELECTRIC_DB_PASSWORD:-electric_password}"
echo "Creating Electric SQL replication user: $ELECTRIC_DB_USER"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
DO \$\$
BEGIN
IF NOT EXISTS (SELECT FROM pg_user WHERE usename = '$ELECTRIC_DB_USER') THEN
CREATE USER $ELECTRIC_DB_USER WITH REPLICATION PASSWORD '$ELECTRIC_DB_PASSWORD';
END IF;
END
\$\$;
GRANT CONNECT ON DATABASE $POSTGRES_DB TO $ELECTRIC_DB_USER;
GRANT CREATE ON DATABASE $POSTGRES_DB TO $ELECTRIC_DB_USER;
GRANT USAGE ON SCHEMA public TO $ELECTRIC_DB_USER;
GRANT SELECT ON ALL TABLES IN SCHEMA public TO $ELECTRIC_DB_USER;
GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO $ELECTRIC_DB_USER;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO $ELECTRIC_DB_USER;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON SEQUENCES TO $ELECTRIC_DB_USER;
DO \$\$
BEGIN
IF NOT EXISTS (SELECT FROM pg_publication WHERE pubname = 'electric_publication_default') THEN
CREATE PUBLICATION electric_publication_default;
END IF;
END
\$\$;
EOSQL
echo "Electric SQL user '$ELECTRIC_DB_USER' and publication created successfully"

View file

@ -109,7 +109,6 @@ $Files = @(
@{ Src = "docker/docker-compose.yml"; Dest = "docker-compose.yml" }
@{ Src = "docker/.env.example"; Dest = ".env.example" }
@{ Src = "docker/postgresql.conf"; Dest = "postgresql.conf" }
@{ Src = "docker/scripts/init-electric-user.sh"; Dest = "scripts/init-electric-user.sh" }
@{ Src = "docker/scripts/migrate-database.ps1"; Dest = "scripts/migrate-database.ps1" }
@{ Src = "docker/searxng/settings.yml"; Dest = "searxng/settings.yml" }
@{ Src = "docker/searxng/limiter.toml"; Dest = "searxng/limiter.toml" }

View file

@ -108,7 +108,6 @@ FILES=(
"docker/docker-compose.yml:docker-compose.yml"
"docker/.env.example:.env.example"
"docker/postgresql.conf:postgresql.conf"
"docker/scripts/init-electric-user.sh:scripts/init-electric-user.sh"
"docker/scripts/migrate-database.sh:scripts/migrate-database.sh"
"docker/searxng/settings.yml:searxng/settings.yml"
"docker/searxng/limiter.toml:searxng/limiter.toml"
@ -122,7 +121,6 @@ for entry in "${FILES[@]}"; do
|| error "Failed to download ${dest}. Check your internet connection and try again."
done
chmod +x "${INSTALL_DIR}/scripts/init-electric-user.sh"
chmod +x "${INSTALL_DIR}/scripts/migrate-database.sh"
success "All files downloaded to ${INSTALL_DIR}/"

View file

@ -17,10 +17,6 @@ REDIS_APP_URL=redis://localhost:6379/0
# Only uncomment if running the backend outside Docker (e.g. uvicorn on host).
# SEARXNG_DEFAULT_HOST=http://localhost:8888
#Electric(for migrations only)
ELECTRIC_DB_USER=electric
ELECTRIC_DB_PASSWORD=electric_password
# Periodic task interval
# # Run every minute (default)
# SCHEDULE_CHECKER_INTERVAL=1m

View file

@ -25,13 +25,6 @@ database_url = os.getenv("DATABASE_URL")
if database_url:
config.set_main_option("sqlalchemy.url", database_url)
# Electric SQL user credentials - centralized configuration for migrations
# These are used by migrations that set up Electric SQL replication
config.set_main_option("electric_db_user", os.getenv("ELECTRIC_DB_USER", "electric"))
config.set_main_option(
"electric_db_password", os.getenv("ELECTRIC_DB_PASSWORD", "electric_password")
)
# Interpret the config file for Python logging.
# This line sets up loggers basically.
if config.config_file_name is not None:

View file

@ -30,21 +30,25 @@ def upgrade() -> None:
"ix_notifications_user_read_type_created",
"notifications",
["user_id", "read", "type", "created_at"],
if_not_exists=True,
)
op.create_index(
"ix_notifications_user_space_created",
"notifications",
["user_id", "search_space_id", "created_at"],
if_not_exists=True,
)
op.create_index(
"ix_notifications_type",
"notifications",
["type"],
if_not_exists=True,
)
op.create_index(
"ix_notifications_search_space_id",
"notifications",
["search_space_id"],
if_not_exists=True,
)

View file

@ -35,52 +35,60 @@ def upgrade() -> None:
END $$;
""")
op.create_table(
"video_presentations",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("title", sa.String(length=500), nullable=False),
sa.Column("slides", JSONB(), nullable=True),
sa.Column("scene_codes", JSONB(), nullable=True),
sa.Column(
"status",
video_presentation_status_enum,
server_default="ready",
nullable=False,
),
sa.Column("search_space_id", sa.Integer(), nullable=False),
sa.Column("thread_id", sa.Integer(), nullable=True),
sa.Column(
"created_at",
sa.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(
["search_space_id"],
["searchspaces.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["thread_id"],
["new_chat_threads.id"],
ondelete="SET NULL",
),
sa.PrimaryKeyConstraint("id"),
conn = op.get_bind()
result = conn.execute(
sa.text("SELECT 1 FROM information_schema.tables WHERE table_name = 'video_presentations'")
)
if not result.fetchone():
op.create_table(
"video_presentations",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("title", sa.String(length=500), nullable=False),
sa.Column("slides", JSONB(), nullable=True),
sa.Column("scene_codes", JSONB(), nullable=True),
sa.Column(
"status",
video_presentation_status_enum,
server_default="ready",
nullable=False,
),
sa.Column("search_space_id", sa.Integer(), nullable=False),
sa.Column("thread_id", sa.Integer(), nullable=True),
sa.Column(
"created_at",
sa.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(
["search_space_id"],
["searchspaces.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["thread_id"],
["new_chat_threads.id"],
ondelete="SET NULL",
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"ix_video_presentations_status",
"video_presentations",
["status"],
if_not_exists=True,
)
op.create_index(
"ix_video_presentations_thread_id",
"video_presentations",
["thread_id"],
if_not_exists=True,
)
op.create_index(
"ix_video_presentations_created_at",
"video_presentations",
["created_at"],
if_not_exists=True,
)

View file

@ -0,0 +1,104 @@
"""Clean up Electric SQL artifacts (user, publication, replication slots)
Revision ID: 108
Revises: 107
Removes leftover Electric SQL infrastructure that is no longer needed after
the migration to Rocicorp Zero. Fully idempotent safe on databases that
never had Electric SQL set up (fresh installs).
Cleaned up:
- Replication slots containing 'electric' (prevents unbounded WAL growth)
- The 'electric_publication_default' publication
- Default privileges, grants, and the 'electric' database user
"""
from collections.abc import Sequence
from alembic import op
revision: str = "108"
down_revision: str | None = "107"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
def upgrade() -> None:
op.execute(
"""
DO $$
DECLARE
slot RECORD;
BEGIN
-- 1. Drop inactive Electric replication slots (prevents WAL growth)
FOR slot IN
SELECT slot_name FROM pg_replication_slots
WHERE slot_name LIKE '%electric%' AND active = false
LOOP
BEGIN
PERFORM pg_drop_replication_slot(slot.slot_name);
EXCEPTION WHEN OTHERS THEN
RAISE WARNING 'Could not drop replication slot %: %', slot.slot_name, SQLERRM;
END;
END LOOP;
-- Warn about active Electric slots that cannot be safely dropped
FOR slot IN
SELECT slot_name FROM pg_replication_slots
WHERE slot_name LIKE '%electric%' AND active = true
LOOP
RAISE WARNING 'Active Electric replication slot "%" was not dropped — drop it manually to stop WAL growth', slot.slot_name;
END LOOP;
-- 2. Drop the Electric publication
BEGIN
IF EXISTS (SELECT 1 FROM pg_publication WHERE pubname = 'electric_publication_default') THEN
DROP PUBLICATION electric_publication_default;
END IF;
EXCEPTION WHEN OTHERS THEN
RAISE WARNING 'Could not drop publication electric_publication_default: %', SQLERRM;
END;
-- 3. Revoke privileges and drop the Electric user
IF EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'electric') THEN
BEGIN
ALTER DEFAULT PRIVILEGES IN SCHEMA public
REVOKE SELECT ON TABLES FROM electric;
ALTER DEFAULT PRIVILEGES IN SCHEMA public
REVOKE SELECT ON SEQUENCES FROM electric;
EXCEPTION WHEN OTHERS THEN
RAISE WARNING 'Could not revoke default privileges from electric: %', SQLERRM;
END;
BEGIN
REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM electric;
REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM electric;
REVOKE USAGE ON SCHEMA public FROM electric;
EXCEPTION WHEN OTHERS THEN
RAISE WARNING 'Could not revoke schema privileges from electric: %', SQLERRM;
END;
BEGIN
EXECUTE format(
'REVOKE CONNECT ON DATABASE %I FROM electric',
current_database()
);
EXCEPTION WHEN OTHERS THEN
RAISE WARNING 'Could not revoke CONNECT from electric: %', SQLERRM;
END;
BEGIN
REASSIGN OWNED BY electric TO CURRENT_USER;
DROP ROLE electric;
EXCEPTION WHEN OTHERS THEN
RAISE WARNING 'Could not drop role electric: %', SQLERRM;
END;
END IF;
END
$$;
"""
)
def downgrade() -> None:
pass

View file

@ -722,7 +722,7 @@ class ChatComment(BaseModel, TimestampMixin):
nullable=False,
index=True,
)
# Denormalized thread_id for efficient Electric SQL subscriptions (one per thread)
# Denormalized thread_id for efficient Zero subscriptions (one per thread)
thread_id = Column(
Integer,
ForeignKey("new_chat_threads.id", ondelete="CASCADE"),
@ -792,7 +792,7 @@ class ChatCommentMention(BaseModel, TimestampMixin):
class ChatSessionState(BaseModel):
"""
Tracks real-time session state for shared chat collaboration.
One record per thread, synced via Electric SQL.
One record per thread, synced via Zero.
"""
__tablename__ = "chat_session_state"

View file

@ -80,7 +80,7 @@ router.include_router(model_list_router) # Dynamic LLM model catalogue from Ope
router.include_router(logs_router)
router.include_router(circleback_webhook_router) # Circleback meeting webhooks
router.include_router(surfsense_docs_router) # Surfsense documentation for citations
router.include_router(notifications_router) # Notifications with Electric SQL sync
router.include_router(notifications_router) # Notifications with Zero sync
router.include_router(composio_router) # Composio OAuth and toolkit management
router.include_router(public_chat_router) # Public chat sharing and cloning
router.include_router(incentive_tasks_router) # Incentive tasks for earning free pages

View file

@ -128,7 +128,7 @@ async def create_documents_file_upload(
Upload files as documents with real-time status tracking.
Implements 2-phase document status updates for real-time UI feedback:
- Phase 1: Create all documents with 'pending' status (visible in UI immediately via ElectricSQL)
- Phase 1: Create all documents with 'pending' status (visible in UI immediately via Zero)
- Phase 2: Celery processes each file: pending processing ready/failed
Requires DOCUMENTS_CREATE permission.

View file

@ -1,7 +1,7 @@
"""
Notifications API routes.
These endpoints allow marking notifications as read and fetching older notifications.
Electric SQL automatically syncs the changes to all connected clients for recent items.
Zero automatically syncs the changes to all connected clients for recent items.
For older items (beyond the sync window), use the list endpoint.
"""
@ -267,7 +267,7 @@ async def get_unread_count(
This allows the frontend to calculate:
- older_unread = total_unread - recent_unread (static until reconciliation)
- Display count = older_unread + live_recent_count (from Electric SQL)
- Display count = older_unread + live_recent_count (from Zero)
"""
# Calculate cutoff date for sync window
cutoff_date = datetime.now(UTC) - timedelta(days=SYNC_WINDOW_DAYS)
@ -344,7 +344,7 @@ async def list_notifications(
List notifications for the current user with pagination.
This endpoint is used as a fallback for older notifications that are
outside the Electric SQL sync window (2 weeks).
outside the Zero sync window (2 weeks).
Use `before_date` to paginate through older notifications efficiently.
"""
@ -487,7 +487,7 @@ async def mark_notification_as_read(
"""
Mark a single notification as read.
Electric SQL will automatically sync this change to all connected clients.
Zero will automatically sync this change to all connected clients.
"""
# Verify the notification belongs to the user
result = await session.execute(
@ -528,7 +528,7 @@ async def mark_all_notifications_as_read(
"""
Mark all notifications as read for the current user.
Electric SQL will automatically sync these changes to all connected clients.
Zero will automatically sync these changes to all connected clients.
"""
# Update all unread notifications for the user
result = await session.execute(

View file

@ -1543,7 +1543,7 @@ async def _run_indexing_with_notifications(
)
await (
session.commit()
) # Commit to ensure Electric SQL syncs the notification update
) # Commit to ensure Zero syncs the notification update
elif documents_processed > 0:
# Update notification to storing stage
if notification:
@ -1570,7 +1570,7 @@ async def _run_indexing_with_notifications(
)
await (
session.commit()
) # Commit to ensure Electric SQL syncs the notification update
) # Commit to ensure Zero syncs the notification update
else:
# No new documents processed - check if this is an error or just no changes
if error_or_warning:
@ -1596,7 +1596,7 @@ async def _run_indexing_with_notifications(
if is_duplicate_warning or is_empty_result or is_info_warning:
# These are success cases - sync worked, just found nothing new
logger.info(f"Indexing completed successfully: {error_or_warning}")
# Still update timestamp so ElectricSQL syncs and clears "Syncing" UI
# Still update timestamp so Zero syncs and clears "Syncing" UI
if update_timestamp_func:
await update_timestamp_func(session, connector_id)
await session.commit() # Commit timestamp update
@ -1619,7 +1619,7 @@ async def _run_indexing_with_notifications(
)
await (
session.commit()
) # Commit to ensure Electric SQL syncs the notification update
) # Commit to ensure Zero syncs the notification update
else:
# Actual failure
logger.error(f"Indexing failed: {error_or_warning}")
@ -1637,13 +1637,13 @@ async def _run_indexing_with_notifications(
)
await (
session.commit()
) # Commit to ensure Electric SQL syncs the notification update
) # Commit to ensure Zero syncs the notification update
else:
# Success - just no new documents to index (all skipped/unchanged)
logger.info(
"Indexing completed: No new documents to process (all up to date)"
)
# Still update timestamp so ElectricSQL syncs and clears "Syncing" UI
# Still update timestamp so Zero syncs and clears "Syncing" UI
if update_timestamp_func:
await update_timestamp_func(session, connector_id)
await session.commit() # Commit timestamp update
@ -1659,7 +1659,7 @@ async def _run_indexing_with_notifications(
)
await (
session.commit()
) # Commit to ensure Electric SQL syncs the notification update
) # Commit to ensure Zero syncs the notification update
except SoftTimeLimitExceeded:
# Celery soft time limit was reached - task is about to be killed
# Gracefully save progress and mark as interrupted
@ -2776,7 +2776,7 @@ async def run_composio_indexing(
Run Composio connector indexing with real-time notifications.
This wraps the Composio indexer with the notification system so that
Electric SQL can sync indexing progress to the frontend in real-time.
Zero can sync indexing progress to the frontend in real-time.
Args:
session: Database session

View file

@ -456,7 +456,7 @@ async def create_comment(
thread = message.thread
comment = ChatComment(
message_id=message_id,
thread_id=thread.id, # Denormalized for efficient Electric subscriptions
thread_id=thread.id, # Denormalized for efficient per-thread sync
author_id=user.id,
content=content,
)
@ -569,7 +569,7 @@ async def create_reply(
thread = parent_comment.message.thread
reply = ChatComment(
message_id=parent_comment.message_id,
thread_id=thread.id, # Denormalized for efficient Electric subscriptions
thread_id=thread.id, # Denormalized for efficient per-thread sync
parent_id=comment_id,
author_id=user.id,
content=content,

View file

@ -1,4 +1,4 @@
"""Service for creating and managing notifications with Electric SQL sync."""
"""Service for creating and managing notifications with Zero sync."""
import logging
from datetime import UTC, datetime
@ -1045,7 +1045,7 @@ class PageLimitNotificationHandler(BaseNotificationHandler):
class NotificationService:
"""Service for creating and managing notifications that sync via Electric SQL."""
"""Service for creating and managing notifications that sync via Zero."""
# Handler instances
connector_indexing = ConnectorIndexingNotificationHandler()
@ -1065,7 +1065,7 @@ class NotificationService:
notification_metadata: dict[str, Any] | None = None,
) -> Notification:
"""
Create a notification - Electric SQL will automatically sync it to frontend.
Create a notification - Zero will automatically sync it to frontend.
Args:
session: Database session

View file

@ -887,7 +887,7 @@ async def _process_file_with_document(
)
try:
# Set status to PROCESSING (shows spinner in UI via ElectricSQL)
# Set status to PROCESSING (shows spinner in UI via Zero)
document.status = DocumentStatus.processing()
await session.commit()
logger.info(
@ -951,7 +951,7 @@ async def _process_file_with_document(
):
page_limit_error = e.__cause__
# Mark document as failed (shows error in UI via ElectricSQL)
# Mark document as failed (shows error in UI via Zero)
error_message = str(e)[:500]
document.status = DocumentStatus.failed(error_message)
document.updated_at = get_current_timestamp()

View file

@ -139,7 +139,7 @@ async def index_airtable_records(
await task_logger.log_task_success(
log_entry, success_msg, {"bases_count": 0}
)
# CRITICAL: Update timestamp even when no bases found so Electric SQL syncs
# CRITICAL: Update timestamp even when no bases found so Zero syncs
await update_connector_last_indexed(
session, connector, update_last_indexed
)
@ -460,7 +460,7 @@ async def index_airtable_records(
documents_failed += 1
continue
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Electric SQL syncs
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Zero syncs
await update_connector_last_indexed(session, connector, update_last_indexed)
total_processed = documents_indexed

View file

@ -462,7 +462,7 @@ async def index_bookstack_pages(
documents_failed += 1
continue
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Electric SQL syncs
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Zero syncs
# This ensures the UI shows "Last indexed" instead of "Never indexed"
await update_connector_last_indexed(session, connector, update_last_indexed)

View file

@ -470,7 +470,7 @@ async def index_clickup_tasks(
total_processed = documents_indexed
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Electric SQL syncs
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Zero syncs
# This ensures the UI shows "Last indexed" instead of "Never indexed"
await update_connector_last_indexed(session, connector, update_last_indexed)

View file

@ -442,7 +442,7 @@ async def index_confluence_pages(
documents_failed += 1
continue # Skip this page and continue with others
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Electric SQL syncs
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Zero syncs
# This ensures the UI shows "Last indexed" instead of "Never indexed"
await update_connector_last_indexed(session, connector, update_last_indexed)

View file

@ -718,7 +718,7 @@ async def index_discord_messages(
documents_failed += 1
continue
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Electric SQL syncs
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Zero syncs
await update_connector_last_indexed(session, connector, update_last_indexed)
# Final commit for any remaining documents not yet committed in batches

View file

@ -413,7 +413,7 @@ async def index_elasticsearch_documents(
documents_failed += 1
continue
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Electric SQL syncs
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Zero syncs
# This ensures the UI shows "Last indexed" instead of "Never indexed"
if update_last_indexed:
connector.last_indexed_at = (

View file

@ -451,7 +451,7 @@ async def index_github_repos(
documents_failed += 1
continue
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Electric SQL syncs
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Zero syncs
await update_connector_last_indexed(session, connector, update_last_indexed)
# Final commit

View file

@ -599,7 +599,7 @@ async def index_google_calendar_events(
documents_failed += 1
continue
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Electric SQL syncs
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Zero syncs
await update_connector_last_indexed(session, connector, update_last_indexed)
# Final commit for any remaining documents not yet committed in batches

View file

@ -519,7 +519,7 @@ async def index_google_gmail_messages(
documents_failed += 1
continue
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Electric SQL syncs
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Zero syncs
await update_connector_last_indexed(session, connector, update_last_indexed)
# Final commit for any remaining documents not yet committed in batches

View file

@ -422,7 +422,7 @@ async def index_jira_issues(
documents_failed += 1
continue # Skip this issue and continue with others
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Electric SQL syncs
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Zero syncs
# This ensures the UI shows "Last indexed" instead of "Never indexed"
await update_connector_last_indexed(session, connector, update_last_indexed)

View file

@ -463,7 +463,7 @@ async def index_linear_issues(
documents_failed += 1
continue
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Electric SQL syncs
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Zero syncs
await update_connector_last_indexed(session, connector, update_last_indexed)
# Final commit for any remaining documents not yet committed in batches

View file

@ -520,7 +520,7 @@ async def index_luma_events(
documents_failed += 1
continue
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Electric SQL syncs
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Zero syncs
# This ensures the UI shows "Last indexed" instead of "Never indexed"
await update_connector_last_indexed(session, connector, update_last_indexed)

View file

@ -252,7 +252,7 @@ async def index_notion_pages(
{"pages_found": 0},
)
logger.info("No Notion pages found to index")
# CRITICAL: Update timestamp even when no pages found so Electric SQL syncs
# CRITICAL: Update timestamp even when no pages found so Zero syncs
await update_connector_last_indexed(session, connector, update_last_indexed)
await session.commit()
await notion_client.close()
@ -506,7 +506,7 @@ async def index_notion_pages(
documents_failed += 1
continue
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Electric SQL syncs
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Zero syncs
await update_connector_last_indexed(session, connector, update_last_indexed)
total_processed = documents_indexed

View file

@ -599,7 +599,7 @@ async def index_obsidian_vault(
failed_count += 1
continue
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Electric SQL syncs
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Zero syncs
await update_connector_last_indexed(session, connector, update_last_indexed)
# Final commit for any remaining documents not yet committed in batches

View file

@ -256,7 +256,7 @@ async def index_slack_messages(
f"No Slack channels found for connector {connector_id}",
{"channels_found": 0},
)
# CRITICAL: Update timestamp even when no channels found so Electric SQL syncs
# CRITICAL: Update timestamp even when no channels found so Zero syncs
await update_connector_last_indexed(session, connector, update_last_indexed)
await session.commit()
return 0, None # Return None (not error) when no channels found
@ -593,7 +593,7 @@ async def index_slack_messages(
documents_failed += 1
continue
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Electric SQL syncs
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Zero syncs
await update_connector_last_indexed(session, connector, update_last_indexed)
# Final commit for any remaining documents not yet committed in batches

View file

@ -249,7 +249,7 @@ async def index_teams_messages(
f"No Teams found for connector {connector_id}",
{"teams_found": 0},
)
# CRITICAL: Update timestamp even when no teams found so Electric SQL syncs
# CRITICAL: Update timestamp even when no teams found so Zero syncs
await update_connector_last_indexed(session, connector, update_last_indexed)
await session.commit()
return 0, None # Return None (not error) when no items found
@ -635,7 +635,7 @@ async def index_teams_messages(
documents_failed += 1
continue
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Electric SQL syncs
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Zero syncs
await update_connector_last_indexed(session, connector, update_last_indexed)
# Final commit for any remaining documents not yet committed in batches

View file

@ -444,7 +444,7 @@ async def index_crawled_urls(
total_processed = documents_indexed + documents_updated
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Electric SQL syncs
# CRITICAL: Always update timestamp (even if 0 documents indexed) so Zero syncs
await update_connector_last_indexed(session, connector, update_last_indexed)
# Final commit for any remaining documents not yet committed in batches

View file

@ -10,7 +10,7 @@ document upload pipeline. It includes various markdown formatting elements.
- Document upload and processing
- Automatic chunking of content
- Embedding generation for semantic search
- Real-time status tracking via ElectricSQL
- Real-time status tracking via Zero
## Technical Architecture

View file

@ -1,10 +1,7 @@
NEXT_PUBLIC_FASTAPI_BACKEND_URL=http://localhost:8000
NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=LOCAL or GOOGLE
NEXT_PUBLIC_ETL_SERVICE=UNSTRUCTURED or LLAMACLOUD or DOCLING
# Electric SQL
NEXT_PUBLIC_ELECTRIC_URL=http://localhost:5133
NEXT_PUBLIC_ELECTRIC_AUTH_MODE=insecure
NEXT_PUBLIC_ZERO_CACHE_URL=http://localhost:4848
# Contact Form Vars (optional)
DATABASE_URL=postgresql://postgres:[YOUR-PASSWORD]@db.sdsf.supabase.co:5432/postgres

View file

@ -35,15 +35,13 @@ RUN corepack enable pnpm
ARG NEXT_PUBLIC_FASTAPI_BACKEND_URL=__NEXT_PUBLIC_FASTAPI_BACKEND_URL__
ARG NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=__NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE__
ARG NEXT_PUBLIC_ETL_SERVICE=__NEXT_PUBLIC_ETL_SERVICE__
ARG NEXT_PUBLIC_ELECTRIC_URL=__NEXT_PUBLIC_ELECTRIC_URL__
ARG NEXT_PUBLIC_ELECTRIC_AUTH_MODE=__NEXT_PUBLIC_ELECTRIC_AUTH_MODE__
ARG NEXT_PUBLIC_ZERO_CACHE_URL=__NEXT_PUBLIC_ZERO_CACHE_URL__
ARG NEXT_PUBLIC_DEPLOYMENT_MODE=__NEXT_PUBLIC_DEPLOYMENT_MODE__
ENV NEXT_PUBLIC_FASTAPI_BACKEND_URL=$NEXT_PUBLIC_FASTAPI_BACKEND_URL
ENV NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=$NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE
ENV NEXT_PUBLIC_ETL_SERVICE=$NEXT_PUBLIC_ETL_SERVICE
ENV NEXT_PUBLIC_ELECTRIC_URL=$NEXT_PUBLIC_ELECTRIC_URL
ENV NEXT_PUBLIC_ELECTRIC_AUTH_MODE=$NEXT_PUBLIC_ELECTRIC_AUTH_MODE
ENV NEXT_PUBLIC_ZERO_CACHE_URL=$NEXT_PUBLIC_ZERO_CACHE_URL
ENV NEXT_PUBLIC_DEPLOYMENT_MODE=$NEXT_PUBLIC_DEPLOYMENT_MODE
COPY --from=deps /app/node_modules ./node_modules

View file

@ -0,0 +1,5 @@
import { NextResponse } from "next/server";
export async function POST() {
return NextResponse.json([]);
}

View file

@ -0,0 +1,50 @@
import { mustGetQuery } from "@rocicorp/zero";
import { handleQueryRequest } from "@rocicorp/zero/server";
import { NextResponse } from "next/server";
import type { Context } from "@/types/zero";
import { queries } from "@/zero/queries";
import { schema } from "@/zero/schema";
const backendURL = process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL || "http://localhost:8000";
async function authenticateRequest(
request: Request
): Promise<{ ctx: Context; error?: never } | { ctx?: never; error: NextResponse }> {
const authHeader = request.headers.get("Authorization");
if (!authHeader?.startsWith("Bearer ")) {
return { ctx: undefined };
}
try {
const res = await fetch(`${backendURL}/users/me`, {
headers: { Authorization: authHeader },
});
if (!res.ok) {
return { error: NextResponse.json({ error: "Unauthorized" }, { status: 401 }) };
}
const user = await res.json();
return { ctx: { userId: String(user.id) } };
} catch {
return { error: NextResponse.json({ error: "Auth service unavailable" }, { status: 503 }) };
}
}
export async function POST(request: Request) {
const auth = await authenticateRequest(request);
if (auth.error) {
return auth.error;
}
const result = await handleQueryRequest(
(name, args) => {
const query = mustGetQuery(queries, name);
return query.fn({ args, ctx: auth.ctx });
},
schema,
request
);
return NextResponse.json(result);
}

View file

@ -40,7 +40,7 @@ import { MobileHitlEditPanel } from "@/components/hitl-edit-panel/hitl-edit-pane
import { MobileReportPanel } from "@/components/report-panel/report-panel";
import { Skeleton } from "@/components/ui/skeleton";
import { useChatSessionStateSync } from "@/hooks/use-chat-session-state";
import { useMessagesElectric } from "@/hooks/use-messages-electric";
import { useMessagesSync } from "@/hooks/use-messages-sync";
import { documentsApiService } from "@/lib/apis/documents-api.service";
import { getBearerToken } from "@/lib/auth-utils";
import { convertToThreadMessage } from "@/lib/chat/message-utils";
@ -192,13 +192,13 @@ export default function NewChatPage() {
// Get current user for author info in shared chats
const { data: currentUser } = useAtomValue(currentUserAtom);
// Live collaboration: sync session state and messages via Electric SQL
// Live collaboration: sync session state and messages via Zero
useChatSessionStateSync(threadId);
const { data: membersData } = useAtomValue(membersAtom);
const handleElectricMessagesUpdate = useCallback(
const handleSyncedMessagesUpdate = useCallback(
(
electricMessages: {
syncedMessages: {
id: number;
thread_id: number;
role: string;
@ -212,11 +212,11 @@ export default function NewChatPage() {
}
setMessages((prev) => {
if (electricMessages.length < prev.length) {
if (syncedMessages.length < prev.length) {
return prev;
}
return electricMessages.map((msg) => {
return syncedMessages.map((msg) => {
const member = msg.author_id
? membersData?.find((m) => m.user_id === msg.author_id)
: null;
@ -243,7 +243,7 @@ export default function NewChatPage() {
[isRunning, membersData]
);
useMessagesElectric(threadId, handleElectricMessagesUpdate);
useMessagesSync(threadId, handleSyncedMessagesUpdate);
// Extract search_space_id from URL params
const searchSpaceId = useMemo(() => {
@ -266,6 +266,7 @@ export default function NewChatPage() {
// Initialize thread and load messages
// For new chats (no urlChatId), we use lazy creation - thread is created on first message
// biome-ignore lint/correctness/useExhaustiveDependencies: searchSpaceId triggers re-init when switching spaces with the same urlChatId
const initializeThread = useCallback(async () => {
setIsInitializing(true);

View file

@ -3,10 +3,10 @@ import "./globals.css";
import { RootProvider } from "fumadocs-ui/provider/next";
import { Roboto } from "next/font/google";
import { AnnouncementToastProvider } from "@/components/announcements/AnnouncementToastProvider";
import { ElectricProvider } from "@/components/providers/ElectricProvider";
import { GlobalLoadingProvider } from "@/components/providers/GlobalLoadingProvider";
import { I18nProvider } from "@/components/providers/I18nProvider";
import { PostHogProvider } from "@/components/providers/PostHogProvider";
import { ZeroProvider } from "@/components/providers/ZeroProvider";
import { ThemeProvider } from "@/components/theme/theme-provider";
import { Toaster } from "@/components/ui/sonner";
import { LocaleProvider } from "@/contexts/LocaleContext";
@ -141,9 +141,9 @@ export default function RootLayout({
>
<RootProvider>
<ReactQueryClientProvider>
<ElectricProvider>
<ZeroProvider>
<GlobalLoadingProvider>{children}</GlobalLoadingProvider>
</ElectricProvider>
</ZeroProvider>
</ReactQueryClientProvider>
<Toaster />
<AnnouncementToastProvider />

View file

@ -213,7 +213,7 @@ export default function sitemap(): MetadataRoute.Sitemap {
},
// How-to documentation
{
url: "https://www.surfsense.com/docs/how-to/electric-sql",
url: "https://www.surfsense.com/docs/how-to/zero-sync",
lastModified,
changeFrequency: "daily",
priority: 0.8,

View file

@ -16,7 +16,6 @@ import {
} from "@/components/ui/dropdown-menu";
import { Spinner } from "@/components/ui/spinner";
import { logout } from "@/lib/auth-utils";
import { cleanupElectric } from "@/lib/electric/client";
import { resetUser, trackLogout } from "@/lib/posthog/events";
export function UserDropdown({
@ -39,14 +38,6 @@ export function UserDropdown({
trackLogout();
resetUser();
// Best-effort cleanup of Electric SQL / PGlite
// Even if this fails, login-time cleanup will handle it
try {
await cleanupElectric();
} catch (err) {
console.warn("[Logout] Electric cleanup failed (will be handled on next login):", err);
}
// Revoke refresh token on server and clear all tokens from localStorage
await logout();

View file

@ -20,7 +20,7 @@ import { Dialog, DialogContent, DialogTitle } from "@/components/ui/dialog";
import { Spinner } from "@/components/ui/spinner";
import { Tabs, TabsContent } from "@/components/ui/tabs";
import type { SearchSourceConnector } from "@/contracts/types/connector.types";
import { useConnectorsElectric } from "@/hooks/use-connectors-electric";
import { useConnectorsSync } from "@/hooks/use-connectors-sync";
import { PICKER_CLOSE_EVENT, PICKER_OPEN_EVENT } from "@/hooks/use-google-picker";
import { cn } from "@/lib/utils";
import { ConnectorDialogHeader } from "./connector-popup/components/connector-dialog-header";
@ -155,33 +155,23 @@ export const ConnectorIndicator = forwardRef<ConnectorIndicatorHandle, Connector
};
}, []);
// Fetch connectors using Electric SQL + PGlite for real-time updates
// This provides instant updates when connectors change, without polling
const {
connectors: connectorsFromElectric = [],
connectors: connectorsFromSync = [],
loading: connectorsLoading,
error: connectorsError,
refreshConnectors: refreshConnectorsElectric,
} = useConnectorsElectric(searchSpaceId);
refreshConnectors: refreshConnectorsSync,
} = useConnectorsSync(searchSpaceId);
// Fallback to API if Electric is not available or fails
// Use Electric data if: 1) we have data, or 2) still loading without error
// Use API data if: Electric failed (has error) or finished loading with no data
const useElectricData =
connectorsFromElectric.length > 0 || (connectorsLoading && !connectorsError);
const connectors = useElectricData ? connectorsFromElectric : allConnectors || [];
const useSyncData = connectorsFromSync.length > 0 || (connectorsLoading && !connectorsError);
const connectors = useSyncData ? connectorsFromSync : allConnectors || [];
// Manual refresh function that works with both Electric and API
const refreshConnectors = async () => {
if (useElectricData) {
await refreshConnectorsElectric();
} else {
// Fallback: use allConnectors from useConnectorDialog (which uses connectorsAtom)
// The connectorsAtom will handle refetching if needed
if (useSyncData) {
await refreshConnectorsSync();
}
};
// Track indexing state locally - clears automatically when Electric SQL detects last_indexed_at changed
// Track indexing state locally - clears automatically when last_indexed_at changes via real-time sync
// Also clears when failed notifications are detected
const { indexingConnectorIds, startIndexing, stopIndexing } = useIndexingConnectors(
connectors as SearchSourceConnector[],
@ -202,7 +192,7 @@ export const ConnectorIndicator = forwardRef<ConnectorIndicatorHandle, Connector
const activeConnectorsCount = connectors.length;
// Check which connectors are already connected
// Using Electric SQL + PGlite for real-time connector updates
// Real-time connector updates via Zero sync
const connectedTypes = new Set<string>(
(connectors || []).map((c: SearchSourceConnector) => c.connector_type)
);
@ -291,7 +281,7 @@ export const ConnectorIndicator = forwardRef<ConnectorIndicatorHandle, Connector
<ConnectorAccountsListView
connectorType={viewingAccountsType.connectorType}
connectorTitle={viewingAccountsType.connectorTitle}
connectors={(connectors || []) as SearchSourceConnector[]} // Using Electric SQL + PGlite for real-time connector updates (all connector types)
connectors={(connectors || []) as SearchSourceConnector[]}
indexingConnectorIds={indexingConnectorIds}
onBack={handleBackFromAccountsList}
onManage={handleStartEdit}
@ -323,7 +313,7 @@ export const ConnectorIndicator = forwardRef<ConnectorIndicatorHandle, Connector
...editingConnector,
config: connectorConfig || editingConnector.config,
name: editingConnector.name,
// Sync last_indexed_at with live data from Electric SQL for real-time updates
// Sync last_indexed_at with live data from real-time sync
last_indexed_at:
(connectors as SearchSourceConnector[]).find((c) => c.id === editingConnector.id)
?.last_indexed_at ?? editingConnector.last_indexed_at,

View file

@ -1254,7 +1254,7 @@ export const useConnectorDialog = () => {
queryKey: cacheKeys.logs.summary(Number(searchSpaceId)),
});
// Note: Don't call stopIndexing here - let useIndexingConnectors hook
// detect when last_indexed_at changes via Electric SQL
// detect when last_indexed_at changes via real-time sync
} catch (error) {
console.error("Error indexing connector content:", error);
toast.error(error instanceof Error ? error.message : "Failed to start indexing");

View file

@ -48,13 +48,13 @@ function isTaskTimedOut(startedAt: string | null | undefined): boolean {
*
* This provides a better UX than polling by:
* 1. Setting indexing state immediately when user triggers indexing (optimistic)
* 2. Detecting in_progress notifications from Electric SQL to restore state after remounts
* 2. Detecting in_progress notifications to restore state after remounts
* 3. Clearing indexing state when notifications become completed or failed
* 4. Clearing indexing state when Electric SQL detects last_indexed_at changed
* 4. Clearing indexing state when real-time sync detects last_indexed_at changed
* 5. Detecting stale/stuck tasks that haven't updated in 15+ minutes
* 6. Detecting hard timeout (8h) - tasks that definitely cannot still be running
*
* The actual `last_indexed_at` value comes from Electric SQL/PGlite, not local state.
* The actual `last_indexed_at` value comes from real-time sync, not local state.
*/
export function useIndexingConnectors(
connectors: SearchSourceConnector[],
@ -66,7 +66,7 @@ export function useIndexingConnectors(
// Track previous last_indexed_at values to detect changes
const previousLastIndexedAtRef = useRef<Map<number, string | null>>(new Map());
// Detect when last_indexed_at changes (indexing completed) via Electric SQL
// Detect when last_indexed_at changes (indexing completed) via real-time sync
useEffect(() => {
const previousValues = previousLastIndexedAtRef.current;

View file

@ -81,7 +81,7 @@ import {
} from "@/contracts/enums/toolIcons";
import type { Document } from "@/contracts/types/document.types";
import { useBatchCommentsPreload } from "@/hooks/use-comments";
import { useCommentsElectric } from "@/hooks/use-comments-electric";
import { useCommentsSync } from "@/hooks/use-comments-sync";
import { useMediaQuery } from "@/hooks/use-media-query";
import { cn } from "@/lib/utils";
@ -347,8 +347,8 @@ const Composer: FC = () => {
const respondingToUserId = sessionState?.respondingToUserId ?? null;
const isBlockedByOtherUser = isAiResponding && respondingToUserId !== currentUser?.id;
// Sync comments for the entire thread via Electric SQL (one subscription per thread)
useCommentsElectric(threadId);
// Sync comments for the entire thread via Zero (one subscription per thread)
useCommentsSync(threadId);
// Batch-prefetch comments for all assistant messages so individual useComments
// hooks never fire their own network requests (eliminates N+1 API calls).

View file

@ -161,7 +161,7 @@ export const FeatureIconContainer = ({
);
};
export const Grid = ({ pattern, size }: { pattern?: number[][]; size?: number }) => {
export const Grid = ({ pattern, size }: { pattern?: [number, number][]; size?: number }) => {
const p = pattern ?? [
[9, 3],
[8, 5],
@ -185,7 +185,7 @@ export const Grid = ({ pattern, size }: { pattern?: number[][]; size?: number })
);
};
export function GridPattern({ width, height, x, y, squares, ...props }: any) {
export function GridPattern({ width, height, x, y, squares, ...props }: React.ComponentProps<"svg"> & { width: number; height: number; x: string | number; y: string | number; squares?: [number, number][] }) {
const patternId = useId();
return (
@ -205,7 +205,7 @@ export function GridPattern({ width, height, x, y, squares, ...props }: any) {
<rect width="100%" height="100%" strokeWidth={0} fill={`url(#${patternId})`} />
{squares && (
<svg aria-hidden="true" x={x} y={y} className="overflow-visible">
{squares.map(([x, y]: any, idx: number) => (
{squares.map(([x, y]: [number, number], idx: number) => (
<rect
strokeWidth="0"
key={`${x}-${y}-${idx}`}

View file

@ -1,14 +1,38 @@
"use client";
import { AnimatePresence, motion } from "motion/react";
import dynamic from "next/dynamic";
import Link from "next/link";
import type React from "react";
import { useEffect, useRef, useState } from "react";
import Balancer from "react-wrap-balancer";
import { HeroCarousel } from "@/components/ui/hero-carousel";
import { AUTH_TYPE, BACKEND_URL } from "@/lib/env-config";
import { trackLoginAttempt } from "@/lib/posthog/events";
import { cn } from "@/lib/utils";
const HeroCarousel = dynamic(
() => import("@/components/ui/hero-carousel").then((m) => ({ default: m.HeroCarousel })),
{
ssr: false,
loading: () => (
<div className="w-full py-4 sm:py-8">
<div className="mx-auto w-full max-w-[900px]">
<div className="overflow-hidden rounded-2xl border border-neutral-200/60 bg-white shadow-xl sm:rounded-3xl dark:border-neutral-700/60 dark:bg-neutral-900">
<div className="flex items-center gap-3 border-b border-neutral-200/60 px-4 py-3 sm:px-6 sm:py-4 dark:border-neutral-700/60">
<div className="min-w-0 flex-1">
<div className="h-5 w-32 animate-pulse rounded bg-neutral-200 dark:bg-neutral-700" />
<div className="mt-2 h-4 w-64 animate-pulse rounded bg-neutral-100 dark:bg-neutral-800" />
</div>
</div>
<div className="bg-neutral-50 p-2 sm:p-3 dark:bg-neutral-950">
<div className="aspect-video w-full animate-pulse rounded-lg bg-neutral-100 sm:rounded-xl dark:bg-neutral-800" />
</div>
</div>
</div>
</div>
),
}
);
// Official Google "G" logo with brand colors
const GoogleLogo = ({ className }: { className?: string }) => (
<svg className={className} viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg">

View file

@ -54,7 +54,6 @@ import { notificationsApiService } from "@/lib/apis/notifications-api.service";
import { searchSpacesApiService } from "@/lib/apis/search-spaces-api.service";
import { logout } from "@/lib/auth-utils";
import { deleteThread, fetchThreads, updateThread } from "@/lib/chat/thread-persistence";
import { cleanupElectric } from "@/lib/electric/client";
import { resetUser, trackLogout } from "@/lib/posthog/events";
import { cacheKeys } from "@/lib/query-client/cache-keys";
import type { ChatItem, NavItem, SearchSpace } from "../types/layout.types";
@ -155,8 +154,6 @@ export function LayoutDataProvider({ searchSpaceId, children }: LayoutDataProvid
// Search space dialog state
const [isCreateSearchSpaceDialogOpen, setIsCreateSearchSpaceDialogOpen] = useState(false);
// Per-tab inbox hooks — each has independent API loading, pagination,
// and Electric live queries. The Electric sync shape is shared (client-level cache).
const userId = user?.id ? String(user.id) : null;
const numericSpaceId = Number(searchSpaceId) || null;
@ -579,14 +576,6 @@ export function LayoutDataProvider({ searchSpaceId, children }: LayoutDataProvid
trackLogout();
resetUser();
// Best-effort cleanup of Electric SQL / PGlite
// Even if this fails, login-time cleanup will handle it
try {
await cleanupElectric();
} catch (err) {
console.warn("[Logout] Electric cleanup failed (will be handled on next login):", err);
}
// Revoke refresh token on server and clear all tokens from localStorage
await logout();

View file

@ -1,118 +0,0 @@
"use client";
import { useAtomValue } from "jotai";
import { usePathname } from "next/navigation";
import { useEffect, useRef, useState } from "react";
import { currentUserAtom } from "@/atoms/user/user-query.atoms";
import { useGlobalLoadingEffect } from "@/hooks/use-global-loading";
import { getBearerToken } from "@/lib/auth-utils";
import {
cleanupElectric,
type ElectricClient,
initElectric,
isElectricInitialized,
} from "@/lib/electric/client";
import { ElectricContext } from "@/lib/electric/context";
const IS_DEV = process.env.NODE_ENV === "development";
interface ElectricProviderProps {
children: React.ReactNode;
}
/**
* Initializes user-specific PGlite database with Electric SQL sync.
* Handles user isolation, cleanup, and re-initialization on user change.
*/
export function ElectricProvider({ children }: ElectricProviderProps) {
const [electricClient, setElectricClient] = useState<ElectricClient | null>(null);
const [error, setError] = useState<Error | null>(null);
const {
data: user,
isSuccess: isUserLoaded,
isError: isUserError,
} = useAtomValue(currentUserAtom);
const previousUserIdRef = useRef<string | null>(null);
const initializingRef = useRef(false);
const pathname = usePathname();
useEffect(() => {
if (typeof window === "undefined") return;
// No user logged in - cleanup if previous user existed
if (!isUserLoaded || !user?.id) {
if (previousUserIdRef.current && isElectricInitialized()) {
if (IS_DEV) console.log("[ElectricProvider] User logged out, cleaning up...");
cleanupElectric().then(() => {
previousUserIdRef.current = null;
setElectricClient(null);
});
}
return;
}
const userId = String(user.id);
// Skip if already initialized for this user or currently initializing
if ((electricClient && previousUserIdRef.current === userId) || initializingRef.current) {
return;
}
initializingRef.current = true;
let mounted = true;
async function init() {
try {
if (IS_DEV) console.log(`[ElectricProvider] Initializing for user: ${userId}`);
const client = await initElectric(userId);
if (mounted) {
previousUserIdRef.current = userId;
setElectricClient(client);
setError(null);
if (IS_DEV) console.log(`[ElectricProvider] ✅ Ready for user: ${userId}`);
}
} catch (err) {
console.error("[ElectricProvider] Failed to initialize:", err);
if (mounted) {
setError(err instanceof Error ? err : new Error("Failed to initialize Electric SQL"));
setElectricClient(null);
}
} finally {
if (mounted) {
initializingRef.current = false;
}
}
}
init();
return () => {
mounted = false;
};
}, [user?.id, isUserLoaded, electricClient]);
const hasToken = typeof window !== "undefined" && !!getBearerToken();
// Only block UI on dashboard routes; public pages render immediately
const requiresElectricLoading = pathname?.startsWith("/dashboard");
const shouldShowLoading =
hasToken && isUserLoaded && !!user?.id && !electricClient && !error && requiresElectricLoading;
useGlobalLoadingEffect(shouldShowLoading);
// Render immediately for unauthenticated users or failed user queries
if (!hasToken || !isUserLoaded || !user?.id || isUserError) {
return <ElectricContext.Provider value={null}>{children}</ElectricContext.Provider>;
}
// Render with null context while initializing
if (!electricClient && !error) {
return <ElectricContext.Provider value={null}>{children}</ElectricContext.Provider>;
}
if (error) {
console.warn("[ElectricProvider] Initialization failed, sync may not work:", error.message);
}
return <ElectricContext.Provider value={electricClient}>{children}</ElectricContext.Provider>;
}

View file

@ -0,0 +1,65 @@
"use client";
import {
useConnectionState,
useZero,
ZeroProvider as ZeroReactProvider,
} from "@rocicorp/zero/react";
import { useAtomValue } from "jotai";
import { useEffect, useRef } from "react";
import { currentUserAtom } from "@/atoms/user/user-query.atoms";
import { getBearerToken, handleUnauthorized, refreshAccessToken } from "@/lib/auth-utils";
import { queries } from "@/zero/queries";
import { schema } from "@/zero/schema";
const cacheURL = process.env.NEXT_PUBLIC_ZERO_CACHE_URL || "http://localhost:4848";
function ZeroAuthGuard({ children }: { children: React.ReactNode }) {
const zero = useZero();
const connectionState = useConnectionState();
const isRefreshingRef = useRef(false);
useEffect(() => {
if (connectionState.name !== "needs-auth" || isRefreshingRef.current) return;
isRefreshingRef.current = true;
refreshAccessToken()
.then((newToken) => {
if (newToken) {
zero.connection.connect({ auth: newToken });
} else {
handleUnauthorized();
}
})
.finally(() => {
isRefreshingRef.current = false;
});
}, [connectionState, zero]);
return <>{children}</>;
}
export function ZeroProvider({ children }: { children: React.ReactNode }) {
const { data: user } = useAtomValue(currentUserAtom);
const hasUser = !!user?.id;
const userID = hasUser ? String(user.id) : "anon";
const context = hasUser ? { userId: String(user.id) } : undefined;
const auth = hasUser ? getBearerToken() || undefined : undefined;
const opts = {
userID,
schema,
queries,
context,
cacheURL,
auth,
};
return (
<ZeroReactProvider {...opts}>
{hasUser ? <ZeroAuthGuard>{children}</ZeroAuthGuard> : children}
</ZeroReactProvider>
);
}

View file

@ -510,93 +510,87 @@ function RolesContent({
<div className="space-y-3">
{roles.map((role) => (
<div key={role.id}>
<RolePermissionsDialog permissions={role.permissions} roleName={role.name}>
<button
type="button"
className="w-full text-left relative flex items-center gap-4 rounded-lg border border-border/60 p-4 transition-colors hover:bg-muted/30 cursor-pointer"
>
<div className="flex-1 min-w-0">
<div className="flex items-center gap-2">
<span className="font-medium text-sm">{role.name}</span>
{role.is_system_role && (
<span className="text-[10px] px-1.5 py-0.5 rounded bg-muted text-muted-foreground font-medium">
System
</span>
)}
{role.is_default && (
<span className="text-[10px] px-1.5 py-0.5 rounded bg-muted text-muted-foreground font-medium">
Default
</span>
<div className="w-full text-left relative flex items-center gap-4 rounded-lg border border-border/60 p-4 transition-colors hover:bg-muted/30">
<div className="flex-1 min-w-0">
<RolePermissionsDialog permissions={role.permissions} roleName={role.name}>
<button type="button" className="w-full text-left cursor-pointer">
<div className="flex items-center gap-2">
<span className="font-medium text-sm">{role.name}</span>
{role.is_system_role && (
<span className="text-[10px] px-1.5 py-0.5 rounded bg-muted text-muted-foreground font-medium">
System
</span>
)}
{role.is_default && (
<span className="text-[10px] px-1.5 py-0.5 rounded bg-muted text-muted-foreground font-medium">
Default
</span>
)}
</div>
{role.description && (
<p className="text-xs text-muted-foreground mt-0.5 truncate">
{role.description}
</p>
)}
</div>
{role.description && (
<p className="text-xs text-muted-foreground mt-0.5 truncate">
{role.description}
</p>
)}
</div>
</RolePermissionsDialog>
</div>
<div className="shrink-0">
<PermissionsBadge permissions={role.permissions} />
</div>
<div className="shrink-0">
<PermissionsBadge permissions={role.permissions} />
</div>
{!role.is_system_role && (
<div
className="shrink-0"
role="none"
onClick={(e) => e.stopPropagation()}
onKeyDown={(e) => e.stopPropagation()}
>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button variant="ghost" size="icon" className="h-8 w-8">
<MoreHorizontal className="h-4 w-4" />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align="end" onCloseAutoFocus={(e) => e.preventDefault()}>
{canUpdate && (
<DropdownMenuItem onClick={() => setEditingRoleId(role.id)}>
<Edit2 className="h-4 w-4 mr-2" />
Edit Role
</DropdownMenuItem>
)}
{canDelete && (
<>
<DropdownMenuSeparator />
<AlertDialog>
<AlertDialogTrigger asChild>
<DropdownMenuItem onSelect={(e) => e.preventDefault()}>
<Trash2 className="h-4 w-4 mr-2" />
Delete Role
</DropdownMenuItem>
</AlertDialogTrigger>
<AlertDialogContent>
<AlertDialogHeader>
<AlertDialogTitle>Delete role?</AlertDialogTitle>
<AlertDialogDescription>
This will permanently delete the &quot;{role.name}&quot; role.
Members with this role will lose their permissions.
</AlertDialogDescription>
</AlertDialogHeader>
<AlertDialogFooter>
<AlertDialogCancel>Cancel</AlertDialogCancel>
<AlertDialogAction
onClick={() => onDeleteRole(role.id)}
className="bg-destructive text-destructive-foreground hover:bg-destructive/90"
>
Delete
</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialog>
</>
)}
</DropdownMenuContent>
</DropdownMenu>
</div>
)}
</button>
</RolePermissionsDialog>
{!role.is_system_role && (
<div className="shrink-0" role="none">
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button variant="ghost" size="icon" className="h-8 w-8">
<MoreHorizontal className="h-4 w-4" />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align="end" onCloseAutoFocus={(e) => e.preventDefault()}>
{canUpdate && (
<DropdownMenuItem onClick={() => setEditingRoleId(role.id)}>
<Edit2 className="h-4 w-4 mr-2" />
Edit Role
</DropdownMenuItem>
)}
{canDelete && (
<>
<DropdownMenuSeparator />
<AlertDialog>
<AlertDialogTrigger asChild>
<DropdownMenuItem onSelect={(e) => e.preventDefault()}>
<Trash2 className="h-4 w-4 mr-2" />
Delete Role
</DropdownMenuItem>
</AlertDialogTrigger>
<AlertDialogContent>
<AlertDialogHeader>
<AlertDialogTitle>Delete role?</AlertDialogTitle>
<AlertDialogDescription>
This will permanently delete the &quot;{role.name}&quot; role.
Members with this role will lose their permissions.
</AlertDialogDescription>
</AlertDialogHeader>
<AlertDialogFooter>
<AlertDialogCancel>Cancel</AlertDialogCancel>
<AlertDialogAction
onClick={() => onDeleteRole(role.id)}
className="bg-destructive text-destructive-foreground hover:bg-destructive/90"
>
Delete
</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialog>
</>
)}
</DropdownMenuContent>
</DropdownMenu>
</div>
)}
</div>
</div>
))}
</div>

View file

@ -24,6 +24,7 @@ function formatTime(seconds: number): string {
export function Audio({ id, src, title, durationMs, className }: AudioProps) {
const audioRef = useRef<HTMLAudioElement>(null);
const downloadControllerRef = useRef<AbortController | null>(null);
const [isPlaying, setIsPlaying] = useState(false);
const [currentTime, setCurrentTime] = useState(0);
const [duration, setDuration] = useState(durationMs ? durationMs / 1000 : 0);
@ -81,8 +82,12 @@ export function Audio({ id, src, title, durationMs, className }: AudioProps) {
// Handle download
const handleDownload = useCallback(async () => {
downloadControllerRef.current?.abort();
const controller = new AbortController();
downloadControllerRef.current = controller;
try {
const response = await fetch(src);
const response = await fetch(src, { signal: controller.signal });
const blob = await response.blob();
const url = window.URL.createObjectURL(blob);
const a = document.createElement("a");
@ -93,10 +98,16 @@ export function Audio({ id, src, title, durationMs, className }: AudioProps) {
document.body.removeChild(a);
window.URL.revokeObjectURL(url);
} catch (err) {
if (err instanceof DOMException && err.name === "AbortError") return;
console.error("Error downloading audio:", err);
}
}, [src, title]);
// Abort in-flight download on unmount
useEffect(() => {
return () => downloadControllerRef.current?.abort();
}, []);
// Set up audio event listeners
useEffect(() => {
const audio = audioRef.current;

View file

@ -80,11 +80,23 @@ function HeroCarouselCard({
useEffect(() => {
const video = videoRef.current;
if (video) {
setHasLoaded(false);
video.currentTime = 0;
video.play().catch(() => {});
}
if (!video) return;
setHasLoaded(false);
video.currentTime = 0;
const observer = new IntersectionObserver(
([entry]) => {
if (entry.isIntersecting) {
video.play().catch(() => {});
observer.disconnect();
}
},
{ threshold: 0.1 }
);
observer.observe(video);
return () => observer.disconnect();
}, [src]);
const handleCanPlay = useCallback(() => {
@ -94,7 +106,6 @@ function HeroCarouselCard({
return (
<>
<div className="overflow-hidden rounded-2xl border border-neutral-200/60 bg-white shadow-xl sm:rounded-3xl dark:border-neutral-700/60 dark:bg-neutral-900">
{" "}
<div className="flex items-center gap-3 border-b border-neutral-200/60 px-4 py-3 sm:px-6 sm:py-4 dark:border-neutral-700/60">
<div className="min-w-0">
<h3 className="truncate text-base font-semibold text-neutral-900 sm:text-xl dark:text-white">
@ -108,7 +119,7 @@ function HeroCarouselCard({
<video
ref={videoRef}
src={src}
autoPlay
preload="none"
loop
muted
playsInline
@ -129,65 +140,11 @@ function HeroCarouselCard({
);
}
function usePrefetchVideos() {
const videosRef = useRef<HTMLVideoElement[]>([]);
useEffect(() => {
let cancelled = false;
async function prefetch() {
for (const item of carouselItems) {
if (cancelled) break;
await new Promise<void>((resolve) => {
const video = document.createElement("video");
video.preload = "auto";
video.src = item.src;
video.oncanplaythrough = () => resolve();
video.onerror = () => resolve();
setTimeout(resolve, 10000);
videosRef.current.push(video);
});
}
}
prefetch();
return () => {
cancelled = true;
videosRef.current = [];
};
}, []);
}
const AUTOPLAY_MS = 6000;
function HeroCarousel() {
const [activeIndex, setActiveIndex] = useState(0);
const [isGifExpanded, setIsGifExpanded] = useState(false);
const [isHovered, setIsHovered] = useState(false);
const [isTabVisible, setIsTabVisible] = useState(true);
const directionRef = useRef<"forward" | "backward">("forward");
usePrefetchVideos();
const shouldAutoPlay = !isGifExpanded && !isHovered && isTabVisible;
useEffect(() => {
if (!shouldAutoPlay) return;
const id = setTimeout(() => {
directionRef.current = "forward";
setActiveIndex((prev) => (prev >= carouselItems.length - 1 ? 0 : prev + 1));
}, AUTOPLAY_MS);
return () => clearTimeout(id);
}, [activeIndex, shouldAutoPlay]);
useEffect(() => {
const handler = () => setIsTabVisible(!document.hidden);
document.addEventListener("visibilitychange", handler);
return () => document.removeEventListener("visibilitychange", handler);
}, []);
const goTo = useCallback(
(newIndex: number) => {
directionRef.current = newIndex >= activeIndex ? "forward" : "backward";
@ -208,11 +165,7 @@ function HeroCarousel() {
const isForward = directionRef.current === "forward";
return (
<div
className="w-full py-4 sm:py-8"
onMouseEnter={() => setIsHovered(true)}
onMouseLeave={() => setIsHovered(false)}
>
<div className="w-full py-4 sm:py-8">
<div className="relative mx-auto w-full max-w-[900px]">
<AnimatePresence mode="wait" initial={false}>
<motion.div
@ -232,53 +185,45 @@ function HeroCarousel() {
</AnimatePresence>
</div>
<div className="relative z-5 mt-6 flex items-center justify-center gap-4">
<button
type="button"
onClick={() => !isGifExpanded && goToPrev()}
className="flex size-9 items-center justify-center rounded-full border border-neutral-200 bg-white text-neutral-700 shadow-sm transition-colors hover:bg-neutral-100 dark:border-neutral-700 dark:bg-neutral-800 dark:text-neutral-200 dark:hover:bg-neutral-700"
aria-label="Previous slide"
>
<ChevronLeft className="size-5" />
</button>
<div className="relative z-5 mt-4 flex items-center justify-center gap-2">
<button
type="button"
onClick={() => !isGifExpanded && goToPrev()}
className="flex size-11 items-center justify-center rounded-full border border-neutral-200 bg-white text-neutral-700 shadow-sm transition-colors hover:bg-neutral-100 touch-manipulation dark:border-neutral-700 dark:bg-neutral-800 dark:text-neutral-200 dark:hover:bg-neutral-700"
aria-label="Previous slide"
>
<ChevronLeft className="size-5" />
</button>
<div className="flex items-center gap-2">
{carouselItems.map((_, i) => (
<button
key={`dot_${i}`}
type="button"
onClick={() => !isGifExpanded && goTo(i)}
className={`relative h-2 overflow-hidden rounded-full transition-all duration-300 ${
<div className="flex items-center">
{carouselItems.map((_, i) => (
<button
key={`dot_${i}`}
type="button"
onClick={() => !isGifExpanded && goTo(i)}
className="flex h-11 min-w-[28px] items-center justify-center touch-manipulation"
aria-label={`Go to slide ${i + 1}`}
>
<span
className={`block h-2.5 rounded-full transition-all duration-300 ${
i === activeIndex
? shouldAutoPlay
? "w-6 bg-neutral-300 dark:bg-neutral-600"
: "w-6 bg-neutral-900 dark:bg-white"
: "w-2 bg-neutral-300 hover:bg-neutral-400 dark:bg-neutral-600 dark:hover:bg-neutral-500"
? "w-6 bg-neutral-900 dark:bg-white"
: "w-2.5 bg-neutral-300 hover:bg-neutral-400 dark:bg-neutral-600 dark:hover:bg-neutral-500"
}`}
aria-label={`Go to slide ${i + 1}`}
>
{i === activeIndex && shouldAutoPlay && (
<motion.span
key={`progress_${activeIndex}`}
className="absolute inset-0 origin-left rounded-full bg-neutral-900 dark:bg-white"
initial={{ scaleX: 0 }}
animate={{ scaleX: 1 }}
transition={{ duration: AUTOPLAY_MS / 1000, ease: "linear" }}
/>
)}
</button>
))}
</div>
<button
type="button"
onClick={() => !isGifExpanded && goToNext()}
className="flex size-9 items-center justify-center rounded-full border border-neutral-200 bg-white text-neutral-700 shadow-sm transition-colors hover:bg-neutral-100 dark:border-neutral-700 dark:bg-neutral-800 dark:text-neutral-200 dark:hover:bg-neutral-700"
aria-label="Next slide"
>
<ChevronRight className="size-5" />
</button>
/>
</button>
))}
</div>
<button
type="button"
onClick={() => !isGifExpanded && goToNext()}
className="flex size-11 items-center justify-center rounded-full border border-neutral-200 bg-white text-neutral-700 shadow-sm transition-colors hover:bg-neutral-100 touch-manipulation dark:border-neutral-700 dark:bg-neutral-800 dark:text-neutral-200 dark:hover:bg-neutral-700"
aria-label="Next slide"
>
<ChevronRight className="size-5" />
</button>
</div>
</div>
);
}

View file

@ -24,7 +24,7 @@ The following `.env` variables are **only used by the dev compose file** (they h
| `REDIS_PORT` | Exposed Redis port (internal-only in prod) | `6379` |
| `NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE` | Frontend build arg for auth type | `LOCAL` |
| `NEXT_PUBLIC_ETL_SERVICE` | Frontend build arg for ETL service | `DOCLING` |
| `NEXT_PUBLIC_ZERO_CACHE_URL` | Frontend build arg for Zero-cache URL | `http://localhost:4848` |
| `NEXT_PUBLIC_DEPLOYMENT_MODE` | Frontend build arg for deployment mode | `self-hosted` |
| `NEXT_PUBLIC_ELECTRIC_AUTH_MODE` | Frontend build arg for Electric auth | `insecure` |
In the production compose file, the `NEXT_PUBLIC_*` frontend variables are automatically derived from `AUTH_TYPE`, `ETL_SERVICE`, and the port settings. In the dev compose file, they are passed as build args since the frontend is built from source.

View file

@ -18,8 +18,6 @@ After starting, access SurfSense at:
- **Frontend**: [http://localhost:3929](http://localhost:3929)
- **Backend API**: [http://localhost:8929](http://localhost:8929)
- **API Docs**: [http://localhost:8929/docs](http://localhost:8929/docs)
- **Electric SQL**: [http://localhost:5929](http://localhost:5929)
---
## Configuration
@ -50,7 +48,7 @@ All configuration lives in a single `docker/.env` file (or `surfsense/.env` if y
|----------|-------------|---------|
| `FRONTEND_PORT` | Frontend service port | `3929` |
| `BACKEND_PORT` | Backend API service port | `8929` |
| `ELECTRIC_PORT` | Electric SQL service port | `5929` |
| `ZERO_CACHE_PORT` | Zero-cache real-time sync port | `5929` |
### Custom Domain / Reverse Proxy
@ -61,7 +59,18 @@ Only set these if serving SurfSense on a real domain via a reverse proxy (Caddy,
| `NEXT_FRONTEND_URL` | Public frontend URL (e.g. `https://app.yourdomain.com`) |
| `BACKEND_URL` | Public backend URL for OAuth callbacks (e.g. `https://api.yourdomain.com`) |
| `NEXT_PUBLIC_FASTAPI_BACKEND_URL` | Backend URL used by the frontend (e.g. `https://api.yourdomain.com`) |
| `NEXT_PUBLIC_ELECTRIC_URL` | Electric SQL URL used by the frontend (e.g. `https://electric.yourdomain.com`) |
| `NEXT_PUBLIC_ZERO_CACHE_URL` | Zero-cache URL used by the frontend (e.g. `https://zero.yourdomain.com`) |
### Zero-cache (Real-Time Sync)
Defaults work out of the box. Change `ZERO_ADMIN_PASSWORD` for security in production.
| Variable | Description | Default |
|----------|-------------|---------|
| `ZERO_ADMIN_PASSWORD` | Password for the zero-cache admin UI and `/statz` endpoint | `surfsense-zero-admin` |
| `ZERO_UPSTREAM_DB` | PostgreSQL connection URL for replication (must be a direct connection, not via pgbouncer) | *(built from DB_* vars)* |
| `ZERO_CVR_DB` | PostgreSQL connection URL for client view records | *(built from DB_* vars)* |
| `ZERO_CHANGE_DB` | PostgreSQL connection URL for replication log entries | *(built from DB_* vars)* |
### Database
@ -77,14 +86,6 @@ Defaults work out of the box. Change for security in production.
| `DB_SSLMODE` | SSL mode: `disable`, `require`, `verify-ca`, `verify-full` | `disable` |
| `DATABASE_URL` | Full connection URL override. Use for managed databases (RDS, Supabase, etc.) | *(built from above)* |
### Electric SQL
| Variable | Description | Default |
|----------|-------------|---------|
| `ELECTRIC_DB_USER` | Replication user for Electric SQL | `electric` |
| `ELECTRIC_DB_PASSWORD` | Replication password for Electric SQL | `electric_password` |
| `ELECTRIC_DATABASE_URL` | Full connection URL override for Electric. Set to `host.docker.internal` when pointing at a local Postgres instance | *(built from above)* |
### Authentication
| Variable | Description |
@ -148,7 +149,7 @@ Uncomment the connectors you want to use. Redirect URIs follow the pattern `http
| `backend` | FastAPI application server |
| `celery_worker` | Background task processing (document indexing, etc.) |
| `celery_beat` | Periodic task scheduler (connector sync) |
| `electric` | Electric SQL (real-time sync for the frontend) |
| `zero-cache` | Rocicorp Zero real-time sync (replicates Postgres to clients) |
| `frontend` | Next.js web application |
All services start automatically with `docker compose up -d`.
@ -165,7 +166,6 @@ docker compose logs -f
# View logs for a specific service
docker compose logs -f backend
docker compose logs -f electric
# Stop all services
docker compose down
@ -183,6 +183,6 @@ docker compose down -v
- **Ports already in use**: Change the relevant `*_PORT` variable in `.env` and restart.
- **Permission errors on Linux**: You may need to prefix `docker` commands with `sudo`.
- **Electric SQL not connecting**: Check `docker compose logs electric`. If it shows `domain does not exist: db`, ensure `ELECTRIC_DATABASE_URL` is not set to a stale value in `.env`.
- **Real-time updates not working in browser**: Open DevTools → Console and look for `[Electric]` errors. Check that `NEXT_PUBLIC_ELECTRIC_URL` matches the running Electric SQL address.
- **Zero-cache not starting**: Check `docker compose logs zero-cache`. Ensure PostgreSQL has `wal_level=logical` (configured automatically by the bundled `postgresql.conf`).
- **Real-time updates not working**: Open DevTools → Console and check for WebSocket errors. Verify `NEXT_PUBLIC_ZERO_CACHE_URL` matches the running zero-cache address.
- **Line ending issues on Windows**: Run `git config --global core.autocrlf true` before cloning.

View file

@ -38,4 +38,4 @@ After starting, access SurfSense at:
- **Frontend**: [http://localhost:3929](http://localhost:3929)
- **Backend API**: [http://localhost:8929](http://localhost:8929)
- **API Docs**: [http://localhost:8929/docs](http://localhost:8929/docs)
- **Electric SQL**: [http://localhost:5929](http://localhost:5929)
- **Zero-cache**: [http://localhost:5929](http://localhost:5929)

View file

@ -1,226 +0,0 @@
---
title: Electric SQL
description: Setting up Electric SQL for real-time data synchronization in SurfSense
---
[Electric SQL](https://electric-sql.com/) enables real-time data synchronization in SurfSense, providing instant updates for inbox items, document indexing status, and connector sync progress without manual refresh. The frontend uses [PGlite](https://pglite.dev/) (a lightweight PostgreSQL in the browser) to maintain a local database that syncs with the backend via Electric SQL.
## What does Electric SQL do?
When you index documents or receive inbox updates, Electric SQL pushes updates to your browser in real-time. The data flows like this:
1. Backend writes data to PostgreSQL
2. Electric SQL detects changes and streams them to the frontend
3. PGlite (running in your browser) receives and stores the data locally in IndexedDB
4. Your UI updates instantly without refreshing
This means:
- **Inbox updates appear instantly** - No need to refresh the page
- **Document indexing progress updates live** - Watch your documents get processed
- **Connector status syncs automatically** - See when connectors finish syncing
- **Offline support** - PGlite caches data locally, so previously loaded data remains accessible
## Docker Setup
- The `docker-compose.yml` includes the Electric SQL service, pre-configured to connect to the Docker-managed `db` container.
- No additional configuration is required. Electric SQL works with the Docker PostgreSQL instance out of the box.
## Manual Setup (Development Only)
This section is intended for local development environments. Follow the steps below based on your PostgreSQL setup.
### Step 1: Configure Environment Variables
Ensure your environment files are configured. If you haven't set up SurfSense yet, follow the [Manual Installation Guide](/docs/manual-installation) first.
For Electric SQL, verify these variables are set:
**Backend (`surfsense_backend/.env`):**
```bash
ELECTRIC_DB_USER=electric
ELECTRIC_DB_PASSWORD=electric_password
```
**Frontend (`surfsense_web/.env`):**
```bash
NEXT_PUBLIC_ELECTRIC_URL=http://localhost:5133
NEXT_PUBLIC_ELECTRIC_AUTH_MODE=insecure
```
Next, choose the option that matches your PostgreSQL setup:
---
### Option A: Using Docker PostgreSQL
If you're using the Docker-managed PostgreSQL instance, no extra configuration is needed. Just start the services using the development compose file (which exposes the PostgreSQL port to your host machine):
```bash
docker compose -f docker-compose.dev.yml up -d db electric
```
Then run the database migration, start the backend, and launch the frontend:
```bash
cd surfsense_backend
uv run alembic upgrade head
uv run main.py
```
In a separate terminal, start the frontend:
```bash
cd surfsense_web
pnpm run dev
```
Electric SQL is now configured and connected to your Docker PostgreSQL database.
---
### Option B: Using Local PostgreSQL
If you're using a local PostgreSQL installation (e.g. Postgres.app on macOS), follow these steps:
**1. Enable logical replication in PostgreSQL:**
Open your `postgresql.conf` file:
```bash
# Common locations:
# macOS (Postgres.app): ~/Library/Application Support/Postgres/var-17/postgresql.conf
# macOS (Homebrew): /opt/homebrew/var/postgresql@17/postgresql.conf
# Linux: /etc/postgresql/17/main/postgresql.conf
sudo vim /path/to/postgresql.conf
```
Add the following settings:
```ini
# Required for Electric SQL
wal_level = logical
max_replication_slots = 10
max_wal_senders = 10
```
After saving, restart PostgreSQL for the settings to take effect.
**2. Create the Electric replication user:**
Connect to your local database as a superuser and run:
```sql
CREATE USER electric WITH REPLICATION PASSWORD 'electric_password';
GRANT CONNECT ON DATABASE surfsense TO electric;
GRANT CREATE ON DATABASE surfsense TO electric;
GRANT USAGE ON SCHEMA public TO electric;
GRANT SELECT ON ALL TABLES IN SCHEMA public TO electric;
GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO electric;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO electric;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON SEQUENCES TO electric;
CREATE PUBLICATION electric_publication_default;
```
**3. Set `ELECTRIC_DATABASE_URL` in `docker/.env`:**
Uncomment and update this line to point Electric at your local Postgres via `host.docker.internal` (the hostname Docker containers use to reach the host machine):
```bash
ELECTRIC_DATABASE_URL=postgresql://electric:electric_password@host.docker.internal:5432/surfsense?sslmode=disable
```
**4. Start Electric SQL only (skip the Docker `db` container):**
```bash
docker compose -f docker-compose.dev.yml up -d --no-deps electric
```
The `--no-deps` flag starts only the `electric` service without starting the Docker-managed `db` container.
**5. Run database migration and start the backend:**
```bash
cd surfsense_backend
uv run alembic upgrade head
uv run main.py
```
In a separate terminal, start the frontend:
```bash
cd surfsense_web
pnpm run dev
```
Electric SQL is now configured and connected to your local PostgreSQL database.
## Environment Variables Reference
**Required for manual setup:**
| Variable | Location | Description | Default |
|----------|----------|-------------|---------|
| `ELECTRIC_DB_USER` | `surfsense_backend/.env` | Database user for Electric replication | `electric` |
| `ELECTRIC_DB_PASSWORD` | `surfsense_backend/.env` | Database password for Electric replication | `electric_password` |
| `NEXT_PUBLIC_ELECTRIC_URL` | `surfsense_web/.env` | Electric SQL server URL (PGlite connects to this) | `http://localhost:5133` |
| `NEXT_PUBLIC_ELECTRIC_AUTH_MODE` | `surfsense_web/.env` | Authentication mode (`insecure` for dev, `secure` for production) | `insecure` |
**Optional / Docker-only:**
| Variable | Location | Description | Default |
|----------|----------|-------------|---------|
| `ELECTRIC_PORT` | `docker/.env` | Port to expose Electric SQL on the host | `5133` (dev), `5929` (production) |
| `ELECTRIC_DATABASE_URL` | `docker/.env` | Full connection URL override for Electric. Only needed for Option B (local Postgres via `host.docker.internal`) | *(built from above defaults)* |
## Verify Setup
To verify Electric SQL is running correctly:
```bash
curl http://localhost:5133/v1/health
```
You should receive:
```json
{"status":"active"}
```
## Troubleshooting
### Electric SQL Server Not Starting
**Check PostgreSQL settings:**
- Ensure `wal_level = logical` is set
- Verify the Electric user has replication permissions
- Check database connectivity from Electric container
### Real-time Updates Not Working
1. Open browser DevTools → Console
2. Look for errors containing `[Electric]`
3. Check Network tab for WebSocket connections to the Electric URL
### Connection Refused Errors
- Verify Electric SQL server is running: `docker ps | grep electric`
- Check the `NEXT_PUBLIC_ELECTRIC_URL` matches your Electric server address
- For Docker setups, ensure the frontend can reach the Electric container
### Data Not Syncing
- Check Electric SQL logs: `docker compose logs electric`
- Verify PostgreSQL replication is working
- Ensure the Electric user has proper table permissions
### PGlite/IndexedDB Issues
If data appears stale or corrupted in the browser:
1. Open browser DevTools → Application → IndexedDB
2. Delete databases starting with `surfsense-`
3. Refresh the page - PGlite will recreate the local database and resync

View file

@ -9,9 +9,9 @@ Practical guides to help you get the most out of SurfSense.
<Cards>
<Card
title="Electric SQL"
description="Setting up Electric SQL for real-time data synchronization"
href="/docs/how-to/electric-sql"
title="Real-Time Sync with Zero"
description="How SurfSense uses Rocicorp Zero for instant real-time data synchronization"
href="/docs/how-to/zero-sync"
/>
<Card
title="Realtime Collaboration"

View file

@ -1,6 +1,6 @@
{
"title": "How to",
"pages": ["electric-sql", "realtime-collaboration", "web-search"],
"pages": ["zero-sync", "realtime-collaboration", "web-search"],
"icon": "Compass",
"defaultOpen": false
}

View file

@ -0,0 +1,92 @@
---
title: Real-Time Sync with Zero
description: How SurfSense uses Rocicorp Zero for instant real-time data synchronization
---
# Real-Time Sync with Zero
SurfSense uses [Rocicorp Zero](https://zero.rocicorp.dev/) for real-time data synchronization. Zero continuously replicates data from PostgreSQL to a local cache on each client, enabling instant UI updates for notifications, documents, connectors, chat messages, and comments.
## How It Works
Zero runs a **zero-cache** server that sits between PostgreSQL and the browser:
1. **zero-cache** replicates data from PostgreSQL into a local SQLite replica using logical replication
2. The browser connects to zero-cache via WebSocket and syncs relevant data locally
3. When data changes in PostgreSQL (e.g., a new notification), zero-cache pushes the update to all connected clients instantly
4. Queries run against local data first for instant results, then update when server data arrives
## Architecture
| Component | Role |
|-----------|------|
| **PostgreSQL** | Source of truth (with `wal_level=logical`) |
| **zero-cache** | Replicates Postgres → SQLite, serves client sync via WebSocket |
| **Browser** | Stores synced data locally, runs queries against local cache |
## Configuration
### Docker Deployment
zero-cache is included in the Docker Compose setup. The key environment variables are:
| Variable | Description | Default |
|----------|-------------|---------|
| `ZERO_CACHE_PORT` | Port for the zero-cache service | `5929` (prod) / `4848` (dev) |
| `ZERO_ADMIN_PASSWORD` | Password for the zero-cache admin UI and `/statz` endpoint | `surfsense-zero-admin` |
| `ZERO_UPSTREAM_DB` | PostgreSQL connection URL for replication | Built from `DB_*` vars |
| `NEXT_PUBLIC_ZERO_CACHE_URL` | URL the frontend uses to connect to zero-cache | `http://localhost:<ZERO_CACHE_PORT>` |
### Manual / Local Development
If running the frontend outside Docker (e.g., `pnpm dev`), you need:
1. A running zero-cache instance pointing at your PostgreSQL database
2. `NEXT_PUBLIC_ZERO_CACHE_URL` set in your `.env` file (default: `http://localhost:4848`)
### Custom Domain / Reverse Proxy
When deploying behind a reverse proxy, set `NEXT_PUBLIC_ZERO_CACHE_URL` to your public zero-cache URL (e.g., `https://zero.yourdomain.com`). The zero-cache service must be accessible via WebSocket from the browser.
### Database Requirements
zero-cache connects to PostgreSQL using logical replication. The database must meet these requirements:
1. **`wal_level = logical`** — already configured in the bundled `postgresql.conf`
2. **The database user must have `REPLICATION` privilege** — required for creating logical replication slots
In the default Docker setup, the `surfsense` user is a PostgreSQL superuser and has all required privileges automatically.
**For managed databases** (RDS, Supabase, Cloud SQL, etc.) where the app user may not be a superuser, you need to grant replication privileges:
```sql
ALTER USER surfsense WITH REPLICATION;
GRANT CREATE ON DATABASE surfsense TO surfsense;
```
The `REPLICATION` privilege allows zero-cache to create a logical replication slot for streaming changes. The `CREATE` privilege allows zero-cache to create internal schemas (`zero`, `zero_0`) for its metadata.
## Synced Tables
Zero syncs the following tables for real-time features:
| Table | Used By |
|-------|---------|
| `notifications` | Inbox (comments, document processing, connector status) |
| `documents` | Document list, processing status indicators |
| `search_source_connectors` | Connector status, indexing progress |
| `new_chat_messages` | Live chat message sync for shared chats |
| `chat_comments` | Real-time comment threads on AI responses |
| `chat_session_state` | Collaboration indicators (who is typing) |
## Troubleshooting
- **zero-cache not starting**: Check `docker compose logs zero-cache`. Ensure PostgreSQL has `wal_level=logical` (configured in `postgresql.conf`).
- **Frontend not syncing**: Open DevTools → Console and check for WebSocket connection errors. Verify `NEXT_PUBLIC_ZERO_CACHE_URL` matches the running zero-cache address.
- **Stale data after restart**: zero-cache rebuilds its SQLite replica from PostgreSQL on startup. This may take a moment for large databases.
## Learn More
- [Rocicorp Zero Documentation](https://zero.rocicorp.dev/docs)
- [Zero Schema Reference](https://zero.rocicorp.dev/docs/schema)
- [Zero Deployment Guide](https://zero.rocicorp.dev/docs/deployment)

View file

@ -73,8 +73,6 @@ Edit the `.env` file and set the following variables:
| AUTH_TYPE | Authentication method: `GOOGLE` for OAuth with Google, `LOCAL` for email/password authentication |
| GOOGLE_OAUTH_CLIENT_ID | (Optional) Client ID from Google Cloud Console (required if AUTH_TYPE=GOOGLE) |
| GOOGLE_OAUTH_CLIENT_SECRET | (Optional) Client secret from Google Cloud Console (required if AUTH_TYPE=GOOGLE) |
| ELECTRIC_DB_USER | (Optional) PostgreSQL username for Electric-SQL connection (default: `electric`) |
| ELECTRIC_DB_PASSWORD | (Optional) PostgreSQL password for Electric-SQL connection (default: `electric_password`) |
| EMBEDDING_MODEL | Name of the embedding model (e.g., `sentence-transformers/all-MiniLM-L6-v2`, `openai://text-embedding-ada-002`) |
| RERANKERS_ENABLED | (Optional) Enable or disable document reranking for improved search results (e.g., `TRUE` or `FALSE`, default: `FALSE`) |
| RERANKERS_MODEL_NAME | Name of the reranker model (e.g., `ms-marco-MiniLM-L-12-v2`) (required if RERANKERS_ENABLED=TRUE) |
@ -410,8 +408,7 @@ Edit the `.env` file and set:
| NEXT_PUBLIC_FASTAPI_BACKEND_URL | Backend URL (e.g., `http://localhost:8000`) |
| NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE | Same value as set in backend AUTH_TYPE i.e `GOOGLE` for OAuth with Google, `LOCAL` for email/password authentication |
| NEXT_PUBLIC_ETL_SERVICE | Document parsing service (should match backend ETL_SERVICE): `UNSTRUCTURED`, `LLAMACLOUD`, or `DOCLING` - affects supported file formats in upload interface |
| NEXT_PUBLIC_ELECTRIC_URL | URL for Electric-SQL service (e.g., `http://localhost:5133`) |
| NEXT_PUBLIC_ELECTRIC_AUTH_MODE | Electric-SQL authentication mode (default: `insecure`) |
| NEXT_PUBLIC_ZERO_CACHE_URL | URL for Zero-cache real-time sync service (e.g., `http://localhost:4848`) |
### 2. Install Dependencies

View file

@ -6,7 +6,7 @@ import { z } from "zod";
export const rawComment = z.object({
id: z.number(),
message_id: z.number(),
thread_id: z.number(), // Denormalized for efficient Electric subscriptions
thread_id: z.number(), // Denormalized for efficient per-thread sync
parent_id: z.number().nullable(),
author_id: z.string().nullable(),
content: z.string(),

View file

@ -1,7 +1,7 @@
import { z } from "zod";
/**
* Raw message from database (Electric SQL sync)
* Raw message from database (real-time sync)
*/
export const rawMessage = z.object({
id: z.number(),

View file

@ -17,14 +17,16 @@ const replacements = [
"__NEXT_PUBLIC_FASTAPI_BACKEND_URL__",
process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL || "http://localhost:8000",
],
["__NEXT_PUBLIC_ELECTRIC_URL__", process.env.NEXT_PUBLIC_ELECTRIC_URL || "http://localhost:5133"],
[
"__NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE__",
process.env.NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE || "LOCAL",
],
["__NEXT_PUBLIC_ETL_SERVICE__", process.env.NEXT_PUBLIC_ETL_SERVICE || "DOCLING"],
[
"__NEXT_PUBLIC_ZERO_CACHE_URL__",
process.env.NEXT_PUBLIC_ZERO_CACHE_URL || "http://localhost:4848",
],
["__NEXT_PUBLIC_DEPLOYMENT_MODE__", process.env.NEXT_PUBLIC_DEPLOYMENT_MODE || "self-hosted"],
["__NEXT_PUBLIC_ELECTRIC_AUTH_MODE__", process.env.NEXT_PUBLIC_ELECTRIC_AUTH_MODE || "insecure"],
];
let filesProcessed = 0;

View file

@ -1,27 +1,19 @@
"use client";
import { useShape } from "@electric-sql/react";
import { useQuery } from "@rocicorp/zero/react";
import { useSetAtom } from "jotai";
import { useEffect } from "react";
import { chatSessionStateAtom } from "@/atoms/chat/chat-session-state.atom";
import type { ChatSessionState } from "@/contracts/types/chat-session-state.types";
const ELECTRIC_URL = process.env.NEXT_PUBLIC_ELECTRIC_URL || "http://localhost:5133";
import { queries } from "@/zero/queries";
/**
* Syncs chat session state for a thread via Electric SQL.
* Syncs chat session state for a thread via Zero.
* Call once per thread (in page.tsx). Updates global atom.
*/
export function useChatSessionStateSync(threadId: number | null) {
const setSessionState = useSetAtom(chatSessionStateAtom);
const { data } = useShape<ChatSessionState>({
url: `${ELECTRIC_URL}/v1/shape`,
params: {
table: "chat_session_state",
where: `thread_id = ${threadId ?? -1}`,
},
});
const [row] = useQuery(queries.chatSession.byThread({ threadId: threadId ?? -1 }));
useEffect(() => {
if (!threadId) {
@ -29,11 +21,10 @@ export function useChatSessionStateSync(threadId: number | null) {
return;
}
const row = data?.[0];
setSessionState({
threadId,
isAiResponding: !!row?.ai_responding_to_user_id,
respondingToUserId: row?.ai_responding_to_user_id ?? null,
isAiResponding: !!row?.aiRespondingToUserId,
respondingToUserId: row?.aiRespondingToUserId ?? null,
});
}, [threadId, data, setSessionState]);
}, [threadId, row, setSessionState]);
}

View file

@ -1,413 +0,0 @@
"use client";
import { useQueryClient } from "@tanstack/react-query";
import { useAtomValue } from "jotai";
import { useCallback, useEffect, useMemo, useRef } from "react";
import { membersAtom, myAccessAtom } from "@/atoms/members/members-query.atoms";
import { currentUserAtom } from "@/atoms/user/user-query.atoms";
import type { Author, Comment, CommentReply } from "@/contracts/types/chat-comments.types";
import type { Membership } from "@/contracts/types/members.types";
import type { SyncHandle } from "@/lib/electric/client";
import { useElectricClient } from "@/lib/electric/context";
import { cacheKeys } from "@/lib/query-client/cache-keys";
// Debounce delay for stream updates (ms)
const STREAM_UPDATE_DEBOUNCE_MS = 100;
// Raw comment from PGlite local database
interface RawCommentRow {
id: number;
message_id: number;
thread_id: number;
parent_id: number | null;
author_id: string | null;
content: string;
created_at: string;
updated_at: string;
}
// Regex pattern to match @[uuid] mentions (matches backend MENTION_PATTERN)
const MENTION_PATTERN = /@\[([0-9a-fA-F-]{36})\]/g;
type MemberInfo = Pick<Membership, "user_display_name" | "user_avatar_url" | "user_email">;
/**
* Render mentions in content by replacing @[uuid] with @{DisplayName}
*/
function renderMentions(content: string, memberMap: Map<string, MemberInfo>): string {
return content.replace(MENTION_PATTERN, (match, uuid) => {
const member = memberMap.get(uuid);
if (member?.user_display_name) {
return `@{${member.user_display_name}}`;
}
return match;
});
}
/**
* Build member lookup map from membersData
*/
function buildMemberMap(membersData: Membership[] | undefined): Map<string, MemberInfo> {
const map = new Map<string, MemberInfo>();
if (membersData) {
for (const m of membersData) {
map.set(m.user_id, {
user_display_name: m.user_display_name,
user_avatar_url: m.user_avatar_url,
user_email: m.user_email,
});
}
}
return map;
}
/**
* Build author object from member data
*/
function buildAuthor(authorId: string | null, memberMap: Map<string, MemberInfo>): Author | null {
if (!authorId) return null;
const m = memberMap.get(authorId);
if (!m) return null;
return {
id: authorId,
display_name: m.user_display_name ?? null,
avatar_url: m.user_avatar_url ?? null,
email: m.user_email ?? "",
};
}
/**
* Check if a comment has been edited by comparing timestamps.
* Uses a small threshold to handle precision differences.
*/
function isEdited(createdAt: string, updatedAt: string): boolean {
const created = new Date(createdAt).getTime();
const updated = new Date(updatedAt).getTime();
// Consider edited if updated_at is more than 1 second after created_at
return updated - created > 1000;
}
/**
* Transform raw comment to CommentReply
*/
function transformReply(
raw: RawCommentRow,
memberMap: Map<string, MemberInfo>,
currentUserId: string | undefined,
isOwner: boolean
): CommentReply {
return {
id: raw.id,
content: raw.content,
content_rendered: renderMentions(raw.content, memberMap),
author: buildAuthor(raw.author_id, memberMap),
created_at: raw.created_at,
updated_at: raw.updated_at,
is_edited: isEdited(raw.created_at, raw.updated_at),
can_edit: currentUserId === raw.author_id,
can_delete: currentUserId === raw.author_id || isOwner,
};
}
/**
* Transform raw comments to Comment with replies
*/
function transformComments(
rawComments: RawCommentRow[],
memberMap: Map<string, MemberInfo>,
currentUserId: string | undefined,
isOwner: boolean
): Map<number, Comment[]> {
// Group comments by message_id
const byMessage = new Map<
number,
{ topLevel: RawCommentRow[]; replies: Map<number, RawCommentRow[]> }
>();
for (const raw of rawComments) {
if (!byMessage.has(raw.message_id)) {
byMessage.set(raw.message_id, { topLevel: [], replies: new Map() });
}
const group = byMessage.get(raw.message_id)!;
if (raw.parent_id === null) {
group.topLevel.push(raw);
} else {
if (!group.replies.has(raw.parent_id)) {
group.replies.set(raw.parent_id, []);
}
group.replies.get(raw.parent_id)!.push(raw);
}
}
// Transform to Comment objects grouped by message_id
const result = new Map<number, Comment[]>();
for (const [messageId, group] of byMessage) {
const comments: Comment[] = group.topLevel.map((raw) => {
const replies = (group.replies.get(raw.id) || [])
.sort((a, b) => new Date(a.created_at).getTime() - new Date(b.created_at).getTime())
.map((r) => transformReply(r, memberMap, currentUserId, isOwner));
return {
id: raw.id,
message_id: raw.message_id,
content: raw.content,
content_rendered: renderMentions(raw.content, memberMap),
author: buildAuthor(raw.author_id, memberMap),
created_at: raw.created_at,
updated_at: raw.updated_at,
is_edited: isEdited(raw.created_at, raw.updated_at),
can_edit: currentUserId === raw.author_id,
can_delete: currentUserId === raw.author_id || isOwner,
reply_count: replies.length,
replies,
};
});
// Sort by created_at
comments.sort((a, b) => new Date(a.created_at).getTime() - new Date(b.created_at).getTime());
result.set(messageId, comments);
}
return result;
}
/**
* Hook for syncing comments with Electric SQL real-time sync.
*
* Syncs ALL comments for a thread in ONE subscription, then updates
* React Query cache for each message. This avoids N subscriptions for N messages.
*
* @param threadId - The thread ID to sync comments for
*/
export function useCommentsElectric(threadId: number | null) {
const electricClient = useElectricClient();
const queryClient = useQueryClient();
const { data: membersData } = useAtomValue(membersAtom);
const { data: currentUser } = useAtomValue(currentUserAtom);
const { data: myAccess } = useAtomValue(myAccessAtom);
const memberMap = useMemo(() => buildMemberMap(membersData), [membersData]);
const currentUserId = currentUser?.id;
const isOwner = myAccess?.is_owner ?? false;
// Use refs for values needed in live query callback to avoid stale closures
const memberMapRef = useRef(memberMap);
const currentUserIdRef = useRef(currentUserId);
const isOwnerRef = useRef(isOwner);
const queryClientRef = useRef(queryClient);
// Keep refs updated
useEffect(() => {
memberMapRef.current = memberMap;
currentUserIdRef.current = currentUserId;
isOwnerRef.current = isOwner;
queryClientRef.current = queryClient;
}, [memberMap, currentUserId, isOwner, queryClient]);
const syncHandleRef = useRef<SyncHandle | null>(null);
const liveQueryRef = useRef<{ unsubscribe: () => void } | null>(null);
const syncKeyRef = useRef<string | null>(null);
const streamUpdateDebounceRef = useRef<ReturnType<typeof setTimeout> | null>(null);
// Stable callback that uses refs for fresh values
const updateReactQueryCache = useCallback((rows: RawCommentRow[]) => {
const commentsByMessage = transformComments(
rows,
memberMapRef.current,
currentUserIdRef.current,
isOwnerRef.current
);
for (const [messageId, comments] of commentsByMessage) {
const cacheKey = cacheKeys.comments.byMessage(messageId);
queryClientRef.current.setQueryData(cacheKey, {
comments,
total_count: comments.length,
});
}
}, []);
useEffect(() => {
if (!threadId || !electricClient) {
return;
}
const syncKey = `comments_${threadId}`;
if (syncKeyRef.current === syncKey) {
return;
}
// Capture in local variable for use in async functions
const client = electricClient;
let mounted = true;
syncKeyRef.current = syncKey;
async function startSync() {
try {
const handle = await client.syncShape({
table: "chat_comments",
where: `thread_id = ${threadId}`,
columns: [
"id",
"message_id",
"thread_id",
"parent_id",
"author_id",
"content",
"created_at",
"updated_at",
],
primaryKey: ["id"],
});
if (!handle.isUpToDate && handle.initialSyncPromise) {
try {
await Promise.race([
handle.initialSyncPromise,
new Promise((resolve) => setTimeout(resolve, 3000)),
]);
} catch {
// Initial sync timeout - continue anyway
}
}
if (!mounted) {
handle.unsubscribe();
return;
}
syncHandleRef.current = handle;
// Fetch initial comments and update cache
await fetchAndUpdateCache();
// Set up live query for real-time updates
await setupLiveQuery();
// Subscribe to the sync stream for real-time updates from Electric SQL
// This ensures we catch updates even if PGlite live query misses them
if (handle.stream) {
const stream = handle.stream as {
subscribe?: (callback: (messages: unknown[]) => void) => void;
};
if (typeof stream.subscribe === "function") {
stream.subscribe((messages: unknown[]) => {
if (!mounted) return;
// When Electric sync receives new data, refresh from PGlite
// This handles cases where live query might miss the update
if (messages && messages.length > 0) {
// Debounce the refresh to avoid excessive queries
if (streamUpdateDebounceRef.current) {
clearTimeout(streamUpdateDebounceRef.current);
}
streamUpdateDebounceRef.current = setTimeout(() => {
if (mounted) {
fetchAndUpdateCache();
}
}, STREAM_UPDATE_DEBOUNCE_MS);
}
});
}
}
} catch {
// Sync failed - will retry on next mount
}
}
async function fetchAndUpdateCache() {
try {
const result = await client.db.query<RawCommentRow>(
`SELECT id, message_id, thread_id, parent_id, author_id, content, created_at, updated_at
FROM chat_comments
WHERE thread_id = $1
ORDER BY created_at ASC`,
[threadId]
);
if (mounted && result.rows) {
updateReactQueryCache(result.rows);
}
} catch {
// Query failed - data will be fetched from API
}
}
async function setupLiveQuery() {
try {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const db = client.db as any;
if (db.live?.query && typeof db.live.query === "function") {
const liveQuery = await db.live.query(
`SELECT id, message_id, thread_id, parent_id, author_id, content, created_at, updated_at
FROM chat_comments
WHERE thread_id = $1
ORDER BY created_at ASC`,
[threadId]
);
if (!mounted) {
liveQuery.unsubscribe?.();
return;
}
// Set initial results
if (liveQuery.initialResults?.rows) {
updateReactQueryCache(liveQuery.initialResults.rows);
} else if (liveQuery.rows) {
updateReactQueryCache(liveQuery.rows);
}
// Subscribe to changes
if (typeof liveQuery.subscribe === "function") {
liveQuery.subscribe((result: { rows: RawCommentRow[] }) => {
if (mounted && result.rows) {
updateReactQueryCache(result.rows);
}
});
}
if (typeof liveQuery.unsubscribe === "function") {
liveQueryRef.current = liveQuery;
}
}
} catch {
// Live query setup failed - will use initial fetch only
}
}
startSync();
return () => {
mounted = false;
syncKeyRef.current = null;
// Clear debounce timeout
if (streamUpdateDebounceRef.current) {
clearTimeout(streamUpdateDebounceRef.current);
streamUpdateDebounceRef.current = null;
}
if (syncHandleRef.current) {
try {
syncHandleRef.current.unsubscribe();
} catch {
// PGlite may already be closed during cleanup
}
syncHandleRef.current = null;
}
if (liveQueryRef.current) {
try {
liveQueryRef.current.unsubscribe();
} catch {
// PGlite may already be closed during cleanup
}
liveQueryRef.current = null;
}
};
}, [threadId, electricClient, updateReactQueryCache]);
}

View file

@ -0,0 +1,212 @@
"use client";
import { useQuery } from "@rocicorp/zero/react";
import { useQueryClient } from "@tanstack/react-query";
import { useAtomValue } from "jotai";
import { useCallback, useEffect, useMemo, useRef } from "react";
import { membersAtom, myAccessAtom } from "@/atoms/members/members-query.atoms";
import { currentUserAtom } from "@/atoms/user/user-query.atoms";
import type { Author, Comment, CommentReply } from "@/contracts/types/chat-comments.types";
import type { Membership } from "@/contracts/types/members.types";
import { cacheKeys } from "@/lib/query-client/cache-keys";
import { queries } from "@/zero/queries";
interface RawCommentRow {
id: number;
message_id: number;
thread_id: number;
parent_id: number | null;
author_id: string | null;
content: string;
created_at: string;
updated_at: string;
}
const MENTION_PATTERN = /@\[([0-9a-fA-F-]{36})\]/g;
type MemberInfo = Pick<Membership, "user_display_name" | "user_avatar_url" | "user_email">;
function renderMentions(content: string, memberMap: Map<string, MemberInfo>): string {
return content.replace(MENTION_PATTERN, (match, uuid) => {
const member = memberMap.get(uuid);
if (member?.user_display_name) {
return `@{${member.user_display_name}}`;
}
return match;
});
}
function buildMemberMap(membersData: Membership[] | undefined): Map<string, MemberInfo> {
const map = new Map<string, MemberInfo>();
if (membersData) {
for (const m of membersData) {
map.set(m.user_id, {
user_display_name: m.user_display_name,
user_avatar_url: m.user_avatar_url,
user_email: m.user_email,
});
}
}
return map;
}
function buildAuthor(authorId: string | null, memberMap: Map<string, MemberInfo>): Author | null {
if (!authorId) return null;
const m = memberMap.get(authorId);
if (!m) return null;
return {
id: authorId,
display_name: m.user_display_name ?? null,
avatar_url: m.user_avatar_url ?? null,
email: m.user_email ?? "",
};
}
function isEdited(createdAt: string, updatedAt: string): boolean {
const created = new Date(createdAt).getTime();
const updated = new Date(updatedAt).getTime();
return updated - created > 1000;
}
function transformReply(
raw: RawCommentRow,
memberMap: Map<string, MemberInfo>,
currentUserId: string | undefined,
isOwner: boolean
): CommentReply {
return {
id: raw.id,
content: raw.content,
content_rendered: renderMentions(raw.content, memberMap),
author: buildAuthor(raw.author_id, memberMap),
created_at: raw.created_at,
updated_at: raw.updated_at,
is_edited: isEdited(raw.created_at, raw.updated_at),
can_edit: currentUserId === raw.author_id,
can_delete: currentUserId === raw.author_id || isOwner,
};
}
function transformComments(
rawComments: RawCommentRow[],
memberMap: Map<string, MemberInfo>,
currentUserId: string | undefined,
isOwner: boolean
): Map<number, Comment[]> {
const byMessage = new Map<
number,
{ topLevel: RawCommentRow[]; replies: Map<number, RawCommentRow[]> }
>();
for (const raw of rawComments) {
if (!byMessage.has(raw.message_id)) {
byMessage.set(raw.message_id, { topLevel: [], replies: new Map() });
}
const group = byMessage.get(raw.message_id)!;
if (raw.parent_id === null) {
group.topLevel.push(raw);
} else {
if (!group.replies.has(raw.parent_id)) {
group.replies.set(raw.parent_id, []);
}
group.replies.get(raw.parent_id)!.push(raw);
}
}
const result = new Map<number, Comment[]>();
for (const [messageId, group] of byMessage) {
const comments: Comment[] = group.topLevel.map((raw) => {
const replies = (group.replies.get(raw.id) || [])
.sort((a, b) => new Date(a.created_at).getTime() - new Date(b.created_at).getTime())
.map((r) => transformReply(r, memberMap, currentUserId, isOwner));
return {
id: raw.id,
message_id: raw.message_id,
content: raw.content,
content_rendered: renderMentions(raw.content, memberMap),
author: buildAuthor(raw.author_id, memberMap),
created_at: raw.created_at,
updated_at: raw.updated_at,
is_edited: isEdited(raw.created_at, raw.updated_at),
can_edit: currentUserId === raw.author_id,
can_delete: currentUserId === raw.author_id || isOwner,
reply_count: replies.length,
replies,
};
});
comments.sort((a, b) => new Date(a.created_at).getTime() - new Date(b.created_at).getTime());
result.set(messageId, comments);
}
return result;
}
/**
* Syncs comments for a thread via Zero real-time sync.
*
* Syncs ALL comments for a thread in ONE subscription, then updates
* React Query cache for each message. This avoids N subscriptions for N messages.
*/
export function useCommentsSync(threadId: number | null) {
const queryClient = useQueryClient();
const { data: membersData } = useAtomValue(membersAtom);
const { data: currentUser } = useAtomValue(currentUserAtom);
const { data: myAccess } = useAtomValue(myAccessAtom);
const memberMap = useMemo(() => buildMemberMap(membersData), [membersData]);
const currentUserId = currentUser?.id;
const isOwner = myAccess?.is_owner ?? false;
const memberMapRef = useRef(memberMap);
const currentUserIdRef = useRef(currentUserId);
const isOwnerRef = useRef(isOwner);
const queryClientRef = useRef(queryClient);
useEffect(() => {
memberMapRef.current = memberMap;
currentUserIdRef.current = currentUserId;
isOwnerRef.current = isOwner;
queryClientRef.current = queryClient;
}, [memberMap, currentUserId, isOwner, queryClient]);
const updateReactQueryCache = useCallback((rows: RawCommentRow[]) => {
const commentsByMessage = transformComments(
rows,
memberMapRef.current,
currentUserIdRef.current,
isOwnerRef.current
);
for (const [messageId, comments] of commentsByMessage) {
const cacheKey = cacheKeys.comments.byMessage(messageId);
queryClientRef.current.setQueryData(cacheKey, {
comments,
total_count: comments.length,
});
}
}, []);
const [data] = useQuery(queries.comments.byThread({ threadId: threadId ?? -1 }));
useEffect(() => {
if (!threadId || !data) return;
const rows: RawCommentRow[] = data.map((c) => ({
id: c.id,
message_id: c.messageId,
thread_id: c.threadId,
parent_id: c.parentId ?? null,
author_id: c.authorId ?? null,
content: c.content,
created_at: new Date(c.createdAt).toISOString(),
updated_at: new Date(c.updatedAt).toISOString(),
}));
updateReactQueryCache(rows);
}, [threadId, data, updateReactQueryCache]);
}

View file

@ -1,220 +0,0 @@
"use client";
import { useCallback, useEffect, useRef, useState } from "react";
import type { SearchSourceConnector } from "@/contracts/types/connector.types";
import type { SyncHandle } from "@/lib/electric/client";
import { useElectricClient } from "@/lib/electric/context";
const IS_DEV = process.env.NODE_ENV === "development";
/**
* Hook for managing connectors with Electric SQL real-time sync
*
* Uses the Electric client from context (provided by ElectricProvider)
* instead of initializing its own - prevents race conditions and memory leaks
*/
export function useConnectorsElectric(searchSpaceId: number | string | null) {
// Get Electric client from context - ElectricProvider handles initialization
const electricClient = useElectricClient();
const [connectors, setConnectors] = useState<SearchSourceConnector[]>([]);
const [loading, setLoading] = useState(true);
const [error, setError] = useState<Error | null>(null);
const syncHandleRef = useRef<SyncHandle | null>(null);
const liveQueryRef = useRef<{ unsubscribe: () => void } | null>(null);
const syncKeyRef = useRef<string | null>(null);
// Transform connector data from Electric SQL/PGlite to match expected format
function transformConnector(connector: any): SearchSourceConnector {
return {
...connector,
last_indexed_at: connector.last_indexed_at
? typeof connector.last_indexed_at === "string"
? connector.last_indexed_at
: new Date(connector.last_indexed_at).toISOString()
: null,
next_scheduled_at: connector.next_scheduled_at
? typeof connector.next_scheduled_at === "string"
? connector.next_scheduled_at
: new Date(connector.next_scheduled_at).toISOString()
: null,
created_at: connector.created_at
? typeof connector.created_at === "string"
? connector.created_at
: new Date(connector.created_at).toISOString()
: new Date().toISOString(),
};
}
// Start syncing when Electric client is available
useEffect(() => {
// If no Electric client available, immediately mark as not loading (disabled)
if (!electricClient) {
setLoading(false);
setError(new Error("Electric SQL not configured"));
return;
}
// Wait for searchSpaceId to be available
if (!searchSpaceId) {
setConnectors([]);
setLoading(false);
return;
}
// Create a unique key for this sync to prevent duplicate subscriptions
const syncKey = `connectors_${searchSpaceId}`;
if (syncKeyRef.current === syncKey) {
// Already syncing for this search space
return;
}
let mounted = true;
syncKeyRef.current = syncKey;
async function startSync() {
try {
if (IS_DEV)
console.log("[useConnectorsElectric] Starting sync for search space:", searchSpaceId);
const handle = await electricClient.syncShape({
table: "search_source_connectors",
where: `search_space_id = ${searchSpaceId}`,
primaryKey: ["id"],
});
if (IS_DEV)
console.log("[useConnectorsElectric] Sync started:", {
isUpToDate: handle.isUpToDate,
});
// Wait for initial sync with timeout
if (!handle.isUpToDate && handle.initialSyncPromise) {
try {
await Promise.race([
handle.initialSyncPromise,
new Promise((resolve) => setTimeout(resolve, 2000)),
]);
} catch (syncErr) {
console.error("[useConnectorsElectric] Initial sync failed:", syncErr);
}
}
if (!mounted) {
handle.unsubscribe();
return;
}
syncHandleRef.current = handle;
setLoading(false);
setError(null);
// Fetch initial connectors
await fetchConnectors();
// Set up live query for real-time updates
await setupLiveQuery();
} catch (err) {
if (!mounted) return;
console.error("[useConnectorsElectric] Failed to start sync:", err);
setError(err instanceof Error ? err : new Error("Failed to sync connectors"));
setLoading(false);
}
}
async function fetchConnectors() {
try {
const result = await electricClient.db.query(
`SELECT * FROM search_source_connectors WHERE search_space_id = $1 ORDER BY created_at DESC`,
[searchSpaceId]
);
if (mounted) {
setConnectors((result.rows || []).map(transformConnector));
}
} catch (err) {
console.error("[useConnectorsElectric] Failed to fetch:", err);
}
}
async function setupLiveQuery() {
try {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const db = electricClient.db as any;
if (db.live?.query && typeof db.live.query === "function") {
const liveQuery = await db.live.query(
`SELECT * FROM search_source_connectors WHERE search_space_id = $1 ORDER BY created_at DESC`,
[searchSpaceId]
);
if (!mounted) {
liveQuery.unsubscribe?.();
return;
}
// Set initial results
if (liveQuery.initialResults?.rows) {
setConnectors(liveQuery.initialResults.rows.map(transformConnector));
} else if (liveQuery.rows) {
setConnectors(liveQuery.rows.map(transformConnector));
}
// Subscribe to changes
if (typeof liveQuery.subscribe === "function") {
liveQuery.subscribe((result: { rows: any[] }) => {
if (mounted && result.rows) {
setConnectors(result.rows.map(transformConnector));
}
});
}
if (typeof liveQuery.unsubscribe === "function") {
liveQueryRef.current = liveQuery;
}
}
} catch (liveErr) {
console.error("[useConnectorsElectric] Failed to set up live query:", liveErr);
}
}
startSync();
return () => {
mounted = false;
syncKeyRef.current = null;
if (syncHandleRef.current) {
try {
syncHandleRef.current.unsubscribe();
} catch {
// PGlite may already be closed during cleanup
}
syncHandleRef.current = null;
}
if (liveQueryRef.current) {
try {
liveQueryRef.current.unsubscribe();
} catch {
// PGlite may already be closed during cleanup
}
liveQueryRef.current = null;
}
};
}, [searchSpaceId, electricClient]);
// Manual refresh function (optional, for fallback)
const refreshConnectors = useCallback(async () => {
if (!electricClient) return;
try {
const result = await electricClient.db.query(
`SELECT * FROM search_source_connectors WHERE search_space_id = $1 ORDER BY created_at DESC`,
[searchSpaceId]
);
setConnectors((result.rows || []).map(transformConnector));
} catch (err) {
console.error("[useConnectorsElectric] Failed to refresh:", err);
}
}, [electricClient, searchSpaceId]);
return { connectors, loading, error, refreshConnectors };
}

View file

@ -0,0 +1,43 @@
"use client";
import { useQuery } from "@rocicorp/zero/react";
import { useMemo } from "react";
import type { SearchSourceConnector } from "@/contracts/types/connector.types";
import { queries } from "@/zero/queries";
/**
* Syncs connectors for a search space via Zero.
* Returns connectors, loading state, error, and a refresh function.
*/
export function useConnectorsSync(searchSpaceId: number | string | null) {
const spaceId = searchSpaceId ? Number(searchSpaceId) : -1;
const [data, result] = useQuery(queries.connectors.bySpace({ searchSpaceId: spaceId }));
const connectors: SearchSourceConnector[] = useMemo(() => {
if (!searchSpaceId || !data) return [];
return data.map((c) => ({
id: c.id,
name: c.name,
connector_type: c.connectorType as SearchSourceConnector["connector_type"],
is_indexable: c.isIndexable,
is_active: true,
last_indexed_at: c.lastIndexedAt ? new Date(c.lastIndexedAt).toISOString() : null,
config: (c.config as Record<string, unknown>) ?? {},
enable_summary: c.enableSummary,
periodic_indexing_enabled: c.periodicIndexingEnabled,
indexing_frequency_minutes: c.indexingFrequencyMinutes ?? null,
next_scheduled_at: c.nextScheduledAt ? new Date(c.nextScheduledAt).toISOString() : null,
search_space_id: c.searchSpaceId,
user_id: c.userId,
created_at: c.createdAt ? new Date(c.createdAt).toISOString() : new Date().toISOString(),
}));
}, [searchSpaceId, data]);
const loading = !searchSpaceId ? false : result.type !== "complete";
const error = !searchSpaceId ? null : null;
const refreshConnectors = async () => {};
return { connectors, loading, error, refreshConnectors };
}

View file

@ -1,7 +1,8 @@
"use client";
import { useQuery } from "@rocicorp/zero/react";
import { useEffect, useRef, useState } from "react";
import { useElectricClient } from "@/lib/electric/context";
import { queries } from "@/zero/queries";
export type DocumentsProcessingStatus = "idle" | "processing" | "success" | "error";
@ -15,152 +16,64 @@ const SUCCESS_LINGER_MS = 5000;
* - "idle" nothing noteworthy (show normal icon)
*/
export function useDocumentsProcessing(searchSpaceId: number | null): DocumentsProcessingStatus {
const electricClient = useElectricClient();
const [status, setStatus] = useState<DocumentsProcessingStatus>("idle");
const liveQueryRef = useRef<{ unsubscribe?: () => void } | null>(null);
const wasProcessingRef = useRef(false);
const successTimerRef = useRef<ReturnType<typeof setTimeout> | null>(null);
const [documents] = useQuery(queries.documents.bySpace({ searchSpaceId: searchSpaceId ?? -1 }));
useEffect(() => {
if (!searchSpaceId || !electricClient) return;
if (!searchSpaceId || !documents) return;
const spaceId = searchSpaceId;
const client = electricClient;
let mounted = true;
let processingCount = 0;
let failedCount = 0;
async function setup() {
if (liveQueryRef.current) {
try {
liveQueryRef.current.unsubscribe?.();
} catch {
/* PGlite may be closed */
}
liveQueryRef.current = null;
}
try {
const handle = await client.syncShape({
table: "documents",
where: `search_space_id = ${spaceId}`,
columns: [
"id",
"document_type",
"search_space_id",
"title",
"created_by_id",
"created_at",
"status",
],
primaryKey: ["id"],
});
if (!mounted) return;
if (!handle.isUpToDate && handle.initialSyncPromise) {
await Promise.race([
handle.initialSyncPromise,
new Promise((resolve) => setTimeout(resolve, 5000)),
]);
}
if (!mounted) return;
const db = client.db as {
live?: {
query: <T>(
sql: string,
params?: (number | string)[]
) => Promise<{
subscribe: (cb: (result: { rows: T[] }) => void) => void;
unsubscribe?: () => void;
}>;
};
};
if (!db.live?.query) return;
const liveQuery = await db.live.query<{
processing_count: number | string;
failed_count: number | string;
}>(
`SELECT
SUM(CASE WHEN status->>'state' IN ('pending', 'processing') THEN 1 ELSE 0 END) AS processing_count,
SUM(CASE WHEN status->>'state' = 'failed' THEN 1 ELSE 0 END) AS failed_count
FROM documents
WHERE search_space_id = $1`,
[spaceId]
);
if (!mounted) {
liveQuery.unsubscribe?.();
return;
}
liveQuery.subscribe(
(result: {
rows: Array<{ processing_count: number | string; failed_count: number | string }>;
}) => {
if (!mounted || !result.rows?.[0]) return;
const processingCount = Number(result.rows[0].processing_count) || 0;
const failedCount = Number(result.rows[0].failed_count) || 0;
if (processingCount > 0) {
wasProcessingRef.current = true;
if (successTimerRef.current) {
clearTimeout(successTimerRef.current);
successTimerRef.current = null;
}
setStatus("processing");
} else if (failedCount > 0) {
wasProcessingRef.current = false;
if (successTimerRef.current) {
clearTimeout(successTimerRef.current);
successTimerRef.current = null;
}
setStatus("error");
} else if (wasProcessingRef.current) {
wasProcessingRef.current = false;
setStatus("success");
if (successTimerRef.current) {
clearTimeout(successTimerRef.current);
}
successTimerRef.current = setTimeout(() => {
if (mounted) {
setStatus("idle");
successTimerRef.current = null;
}
}, SUCCESS_LINGER_MS);
} else {
setStatus("idle");
}
}
);
liveQueryRef.current = liveQuery;
} catch (err) {
console.error("[useDocumentsProcessing] Electric setup failed:", err);
for (const doc of documents) {
const state = (doc.status as { state?: string } | null)?.state;
if (state === "pending" || state === "processing") {
processingCount++;
} else if (state === "failed") {
failedCount++;
}
}
setup();
return () => {
mounted = false;
if (processingCount > 0) {
wasProcessingRef.current = true;
if (successTimerRef.current) {
clearTimeout(successTimerRef.current);
successTimerRef.current = null;
}
if (liveQueryRef.current) {
try {
liveQueryRef.current.unsubscribe?.();
} catch {
/* PGlite may be closed */
}
liveQueryRef.current = null;
setStatus("processing");
} else if (failedCount > 0) {
wasProcessingRef.current = false;
if (successTimerRef.current) {
clearTimeout(successTimerRef.current);
successTimerRef.current = null;
}
setStatus("error");
} else if (wasProcessingRef.current) {
wasProcessingRef.current = false;
setStatus("success");
if (successTimerRef.current) {
clearTimeout(successTimerRef.current);
}
successTimerRef.current = setTimeout(() => {
setStatus("idle");
successTimerRef.current = null;
}, SUCCESS_LINGER_MS);
} else {
setStatus("idle");
}
}, [searchSpaceId, documents]);
useEffect(() => {
return () => {
if (successTimerRef.current) {
clearTimeout(successTimerRef.current);
successTimerRef.current = null;
}
};
}, [searchSpaceId, electricClient]);
}, []);
return status;
}

View file

@ -1,27 +1,16 @@
"use client";
import { useQuery } from "@rocicorp/zero/react";
import { useCallback, useEffect, useRef, useState } from "react";
import type { DocumentSortBy, DocumentTypeEnum, SortOrder } from "@/contracts/types/document.types";
import { documentsApiService } from "@/lib/apis/documents-api.service";
import { filterNewElectricItems, getNewestTimestamp } from "@/lib/electric/baseline";
import type { SyncHandle } from "@/lib/electric/client";
import { useElectricClient } from "@/lib/electric/context";
import { queries } from "@/zero/queries";
export interface DocumentStatusType {
state: "ready" | "pending" | "processing" | "failed";
reason?: string;
}
interface DocumentElectric {
id: number;
search_space_id: number;
document_type: string;
title: string;
created_by_id: string | null;
created_at: string;
status: DocumentStatusType | null;
}
export interface DocumentDisplay {
id: number;
search_space_id: number;
@ -64,23 +53,14 @@ const EMPTY_TYPE_FILTER: DocumentTypeEnum[] = [];
const INITIAL_PAGE_SIZE = 50;
const SCROLL_PAGE_SIZE = 5;
function isValidDocument(doc: DocumentElectric): boolean {
return doc.id != null && doc.title != null && doc.title !== "";
}
/**
* Paginated documents hook with Electric SQL real-time updates.
* Paginated documents hook with Zero real-time updates.
*
* Architecture:
* 1. API is the PRIMARY data source fetches pages on demand
* 2. Type counts come from a dedicated lightweight API endpoint
* 3. Electric provides REAL-TIME updates (new docs, deletions, status changes)
* 3. Zero provides REAL-TIME updates (new docs, deletions, status changes)
* 4. Server-side sorting via sort_by + sort_order params
*
* @param searchSpaceId - The search space to load documents for
* @param typeFilter - Document types to filter by (server-side)
* @param sortBy - Column to sort by (server-side)
* @param sortOrder - Sort direction (server-side)
*/
export function useDocuments(
searchSpaceId: number | null,
@ -88,8 +68,6 @@ export function useDocuments(
sortBy: DocumentSortBy = "created_at",
sortOrder: SortOrder = "desc"
) {
const electricClient = useElectricClient();
const [documents, setDocuments] = useState<DocumentDisplay[]>([]);
const [typeCounts, setTypeCounts] = useState<Record<string, number>>({});
const [total, setTotal] = useState(0);
@ -103,14 +81,8 @@ export function useDocuments(
const prevParamsRef = useRef<{ sortBy: string; sortOrder: string; typeFilterKey: string } | null>(
null
);
// Snapshot of all doc IDs from Electric's first callback after initial load.
// Anything appearing in subsequent callbacks NOT in this set is genuinely new.
const electricBaselineIdsRef = useRef<Set<number> | null>(null);
const newestApiTimestampRef = useRef<string | null>(null);
const userCacheRef = useRef<Map<string, string>>(new Map());
const emailCacheRef = useRef<Map<string, string>>(new Map());
const syncHandleRef = useRef<SyncHandle | null>(null);
const liveQueryRef = useRef<{ unsubscribe?: () => void } | null>(null);
const typeFilterKey = typeFilter.join(",");
@ -141,20 +113,6 @@ export function useDocuments(
[]
);
const electricToDisplayDoc = useCallback(
(doc: DocumentElectric): DocumentDisplay => ({
...doc,
created_by_name: doc.created_by_id
? (userCacheRef.current.get(doc.created_by_id) ?? null)
: null,
created_by_email: doc.created_by_id
? (emailCacheRef.current.get(doc.created_by_id) ?? null)
: null,
status: doc.status ?? { state: "ready" },
}),
[]
);
// EFFECT 1: Fetch first page + type counts when params change
// biome-ignore lint/correctness/useExhaustiveDependencies: typeFilterKey serializes typeFilter
useEffect(() => {
@ -178,8 +136,6 @@ export function useDocuments(
}
apiLoadedCountRef.current = 0;
initialLoadDoneRef.current = false;
electricBaselineIdsRef.current = null;
newestApiTimestampRef.current = null;
const fetchInitialData = async () => {
try {
@ -209,7 +165,6 @@ export function useDocuments(
setTypeCounts(countsResponse);
setError(null);
apiLoadedCountRef.current = docsResponse.items.length;
newestApiTimestampRef.current = getNewestTimestamp(docsResponse.items);
initialLoadDoneRef.current = true;
} catch (err) {
if (cancelled) return;
@ -226,207 +181,106 @@ export function useDocuments(
};
}, [searchSpaceId, typeFilterKey, sortBy, sortOrder, populateUserCache, apiToDisplayDoc]);
// EFFECT 2: Electric sync + live query for real-time updates
// EFFECT 2: Zero real-time sync for document updates
const [zeroDocuments] = useQuery(
queries.documents.bySpace({ searchSpaceId: searchSpaceId ?? -1 })
);
useEffect(() => {
if (!searchSpaceId || !electricClient) return;
if (!searchSpaceId || !zeroDocuments || !initialLoadDoneRef.current) return;
const spaceId = searchSpaceId;
const client = electricClient;
let mounted = true;
const validItems = zeroDocuments.filter(
(doc) => doc.id != null && doc.title != null && doc.title !== ""
);
async function setupElectricRealtime() {
if (syncHandleRef.current) {
try {
syncHandleRef.current.unsubscribe();
} catch {
/* PGlite may already be closed */
}
syncHandleRef.current = null;
}
if (liveQueryRef.current) {
try {
liveQueryRef.current.unsubscribe?.();
} catch {
/* PGlite may already be closed */
}
liveQueryRef.current = null;
}
const unknownUserIds = validItems.filter(
(doc) => doc.createdById !== null && !userCacheRef.current.has(doc.createdById!)
);
try {
const handle = await client.syncShape({
table: "documents",
where: `search_space_id = ${spaceId}`,
columns: [
"id",
"document_type",
"search_space_id",
"title",
"created_by_id",
"created_at",
"status",
],
primaryKey: ["id"],
});
if (!mounted) {
handle.unsubscribe();
return;
}
syncHandleRef.current = handle;
if (!handle.isUpToDate && handle.initialSyncPromise) {
await Promise.race([
handle.initialSyncPromise,
new Promise((resolve) => setTimeout(resolve, 5000)),
]);
}
if (!mounted) return;
const db = client.db as {
live?: {
query: <T>(
sql: string,
params?: (number | string)[]
) => Promise<{
subscribe: (cb: (result: { rows: T[] }) => void) => void;
unsubscribe?: () => void;
}>;
};
};
if (!db.live?.query) return;
const query = `SELECT id, document_type, search_space_id, title, created_by_id, created_at, status
FROM documents
WHERE search_space_id = $1
ORDER BY created_at DESC`;
const liveQuery = await db.live.query<DocumentElectric>(query, [spaceId]);
if (!mounted) {
liveQuery.unsubscribe?.();
return;
}
liveQuery.subscribe((result: { rows: DocumentElectric[] }) => {
if (!mounted || !result.rows || !initialLoadDoneRef.current) return;
const validItems = result.rows.filter(isValidDocument);
const isFullySynced = syncHandleRef.current?.isUpToDate ?? false;
const unknownUserIds = validItems
.filter(
(doc): doc is DocumentElectric & { created_by_id: string } =>
doc.created_by_id !== null && !userCacheRef.current.has(doc.created_by_id)
)
.map((doc) => doc.created_by_id);
if (unknownUserIds.length > 0) {
documentsApiService
.getDocuments({
queryParams: {
search_space_id: spaceId,
page: 0,
page_size: 20,
},
})
.then((response) => {
populateUserCache(response.items);
if (mounted) {
setDocuments((prev) =>
prev.map((doc) => ({
...doc,
created_by_name: doc.created_by_id
? (userCacheRef.current.get(doc.created_by_id) ?? null)
: null,
created_by_email: doc.created_by_id
? (emailCacheRef.current.get(doc.created_by_id) ?? null)
: null,
}))
);
}
})
.catch(() => {});
}
setDocuments((prev) => {
const liveIds = new Set(validItems.map((d) => d.id));
const prevIds = new Set(prev.map((d) => d.id));
const newItems = filterNewElectricItems(
validItems,
liveIds,
prevIds,
electricBaselineIdsRef,
newestApiTimestampRef.current
).map(electricToDisplayDoc);
// Update existing docs (status changes, title edits)
let updated = prev.map((doc) => {
if (liveIds.has(doc.id)) {
const liveItem = validItems.find((v) => v.id === doc.id);
if (liveItem) {
return electricToDisplayDoc(liveItem);
}
}
return doc;
});
// Remove deleted docs (only when fully synced)
if (isFullySynced) {
updated = updated.filter((doc) => liveIds.has(doc.id));
}
if (newItems.length > 0) {
return [...newItems, ...updated];
}
return updated;
});
// Update type counts when Electric detects changes
if (isFullySynced && validItems.length > 0) {
const counts: Record<string, number> = {};
for (const item of validItems) {
counts[item.document_type] = (counts[item.document_type] || 0) + 1;
}
setTypeCounts(counts);
setTotal(validItems.length);
}
});
liveQueryRef.current = liveQuery;
} catch (err) {
console.error("[useDocuments] Electric setup failed:", err);
}
if (unknownUserIds.length > 0) {
documentsApiService
.getDocuments({
queryParams: {
search_space_id: searchSpaceId,
page: 0,
page_size: 20,
},
})
.then((response) => {
populateUserCache(response.items);
setDocuments((prev) =>
prev.map((doc) => ({
...doc,
created_by_name: doc.created_by_id
? (userCacheRef.current.get(doc.created_by_id) ?? null)
: null,
created_by_email: doc.created_by_id
? (emailCacheRef.current.get(doc.created_by_id) ?? null)
: null,
}))
);
})
.catch(() => {});
}
setupElectricRealtime();
const liveIds = new Set(validItems.map((d) => d.id));
return () => {
mounted = false;
if (syncHandleRef.current) {
try {
syncHandleRef.current.unsubscribe();
} catch {
/* PGlite may already be closed */
}
syncHandleRef.current = null;
}
if (liveQueryRef.current) {
try {
liveQueryRef.current.unsubscribe?.();
} catch {
/* PGlite may already be closed */
}
liveQueryRef.current = null;
}
};
}, [searchSpaceId, electricClient, electricToDisplayDoc, populateUserCache]);
setDocuments((prev) => {
const prevIds = new Set(prev.map((d) => d.id));
// Reset on search space change
const newItems: DocumentDisplay[] = validItems
.filter((d) => !prevIds.has(d.id))
.map((doc) => ({
id: doc.id,
search_space_id: doc.searchSpaceId,
document_type: doc.documentType,
title: doc.title,
created_by_id: doc.createdById ?? null,
created_by_name: doc.createdById
? (userCacheRef.current.get(doc.createdById) ?? null)
: null,
created_by_email: doc.createdById
? (emailCacheRef.current.get(doc.createdById) ?? null)
: null,
created_at: new Date(doc.createdAt).toISOString(),
status: (doc.status as unknown as DocumentStatusType) ?? { state: "ready" },
}));
let updated = prev.map((existing) => {
if (liveIds.has(existing.id)) {
const liveItem = validItems.find((v) => v.id === existing.id);
if (liveItem) {
return {
...existing,
title: liveItem.title,
document_type: liveItem.documentType,
status: (liveItem.status as unknown as DocumentStatusType) ?? {
state: "ready" as const,
},
};
}
}
return existing;
});
updated = updated.filter((doc) => liveIds.has(doc.id));
if (newItems.length > 0) {
return [...newItems, ...updated];
}
return updated;
});
const counts: Record<string, number> = {};
for (const item of validItems) {
counts[item.documentType] = (counts[item.documentType] || 0) + 1;
}
setTypeCounts(counts);
setTotal(validItems.length);
}, [searchSpaceId, zeroDocuments, populateUserCache]);
// EFFECT 3: Reset on search space change
const prevSearchSpaceIdRef = useRef<number | null>(null);
useEffect(() => {
@ -437,8 +291,6 @@ export function useDocuments(
setHasMore(false);
apiLoadedCountRef.current = 0;
initialLoadDoneRef.current = false;
electricBaselineIdsRef.current = null;
newestApiTimestampRef.current = null;
userCacheRef.current.clear();
emailCacheRef.current.clear();
}

View file

@ -1,10 +1,10 @@
"use client";
import { useQuery } from "@rocicorp/zero/react";
import { useCallback, useEffect, useRef, useState } from "react";
import type { InboxItem, NotificationCategory } from "@/contracts/types/inbox.types";
import { notificationsApiService } from "@/lib/apis/notifications-api.service";
import { filterNewElectricItems, getNewestTimestamp } from "@/lib/electric/baseline";
import { useElectricClient } from "@/lib/electric/context";
import { queries } from "@/zero/queries";
export type {
InboxItem,
@ -16,17 +16,16 @@ const INITIAL_PAGE_SIZE = 50;
const SCROLL_PAGE_SIZE = 30;
const SYNC_WINDOW_DAYS = 4;
const CATEGORY_TYPE_SQL: Record<NotificationCategory, string> = {
comments: "AND type IN ('new_mention', 'comment_reply')",
status:
"AND type IN ('connector_indexing', 'connector_deletion', 'document_processing', 'page_limit_exceeded')",
const CATEGORY_TYPES: Record<NotificationCategory, string[]> = {
comments: ["new_mention", "comment_reply"],
status: [
"connector_indexing",
"connector_deletion",
"document_processing",
"page_limit_exceeded",
],
};
/**
* Calculate the cutoff date for sync window.
* Rounds to the start of the day (midnight UTC) to ensure stable values
* across re-renders.
*/
function getSyncCutoffDate(): string {
const cutoff = new Date();
cutoff.setDate(cutoff.getDate() - SYNC_WINDOW_DAYS);
@ -35,24 +34,12 @@ function getSyncCutoffDate(): string {
}
/**
* Hook for managing inbox items with API-first architecture + Electric real-time deltas.
* Hook for managing inbox items with API-first architecture + Zero real-time deltas.
*
* Architecture (Documents pattern, per-tab):
* Architecture:
* 1. API is the PRIMARY data source fetches first page on mount with category filter
* 2. Electric provides REAL-TIME updates (new items, status changes, read state)
* 3. Baseline pattern prevents duplicates between API and Electric
* 4. Electric sync shape is SHARED across instances (client-level caching)
* each instance creates its own type-filtered live queries
*
* Unread count strategy:
* - API provides the category-filtered total on mount (ground truth across all time)
* - Electric live query counts unread within SYNC_WINDOW_DAYS (filtered by type)
* - olderUnreadOffsetRef bridges the gap: total = offset + recent
* - Optimistic updates adjust both the count and the offset (for old items)
*
* @param userId - The user ID to fetch inbox items for
* @param searchSpaceId - The search space ID to filter inbox items
* @param category - Which tab: "comments" or "status"
* 2. Zero provides REAL-TIME updates (new items, status changes, read state)
* 3. Unread count = olderUnreadOffset + recent unread from Zero
*/
export function useInbox(
userId: string | null,
@ -61,8 +48,6 @@ export function useInbox(
prefetchedUnread?: { total_unread: number; recent_unread: number } | null,
prefetchedUnreadReady = true
) {
const electricClient = useElectricClient();
const [inboxItems, setInboxItems] = useState<InboxItem[]>([]);
const [loading, setLoading] = useState(true);
const [loadingMore, setLoadingMore] = useState(false);
@ -71,17 +56,12 @@ export function useInbox(
const [unreadCount, setUnreadCount] = useState(0);
const initialLoadDoneRef = useRef(false);
const electricBaselineIdsRef = useRef<Set<number> | null>(null);
const newestApiTimestampRef = useRef<string | null>(null);
const liveQueryRef = useRef<{ unsubscribe?: () => void } | null>(null);
const unreadLiveQueryRef = useRef<{ unsubscribe?: () => void } | null>(null);
const olderUnreadOffsetRef = useRef<number | null>(null);
const apiUnreadTotalRef = useRef(0);
// EFFECT 1: Fetch first page + unread count from API with category filter.
// When prefetchedUnreadReady=false, we wait for the batch query to settle
// before deciding whether we need an individual unread-count fallback call.
const categoryTypes = CATEGORY_TYPES[category];
// EFFECT 1: Fetch first page + unread count from API with category filter
useEffect(() => {
if (!userId || !searchSpaceId) return;
if (!prefetchedUnreadReady) return;
@ -92,8 +72,6 @@ export function useInbox(
setInboxItems([]);
setHasMore(false);
initialLoadDoneRef.current = false;
electricBaselineIdsRef.current = null;
newestApiTimestampRef.current = null;
olderUnreadOffsetRef.current = null;
apiUnreadTotalRef.current = 0;
@ -107,7 +85,6 @@ export function useInbox(
},
});
// Use prefetched counts when available, otherwise fetch individually.
const unreadPromise = prefetchedUnread
? Promise.resolve(prefetchedUnread)
: notificationsApiService.getUnreadCount(searchSpaceId, undefined, category);
@ -123,7 +100,6 @@ export function useInbox(
setHasMore(notificationsResponse.has_more);
setUnreadCount(unreadResponse.total_unread);
apiUnreadTotalRef.current = unreadResponse.total_unread;
newestApiTimestampRef.current = getNewestTimestamp(notificationsResponse.items);
setError(null);
initialLoadDoneRef.current = true;
} catch (err) {
@ -141,208 +117,83 @@ export function useInbox(
};
}, [userId, searchSpaceId, category, prefetchedUnread, prefetchedUnreadReady]);
// EFFECT 2: Electric sync (shared shape) + per-instance type-filtered live queries
// EFFECT 2: Zero real-time sync for notification updates
const [zeroNotifications] = useQuery(queries.notifications.byUser({ userId: userId ?? "" }));
useEffect(() => {
if (!userId || !searchSpaceId || !electricClient) return;
if (!userId || !searchSpaceId || !zeroNotifications || !initialLoadDoneRef.current) return;
const uid = userId;
const spaceId = searchSpaceId;
const client = electricClient;
const typeFilter = CATEGORY_TYPE_SQL[category];
let mounted = true;
const cutoff = new Date(getSyncCutoffDate());
async function setupElectricRealtime() {
// Clean up previous live queries (NOT the sync shape — it's shared)
if (liveQueryRef.current) {
try {
liveQueryRef.current.unsubscribe?.();
} catch {
/* PGlite may be closed */
const validItems = zeroNotifications.filter((item) => {
if (item.id == null) return false;
if (!categoryTypes.includes(item.type)) return false;
if (item.searchSpaceId !== null && item.searchSpaceId !== searchSpaceId) return false;
return true;
});
const recentItems = validItems.filter((item) => new Date(item.createdAt) > cutoff);
const liveIds = new Set(recentItems.map((d) => d.id));
setInboxItems((prev) => {
const prevIds = new Set(prev.map((d) => d.id));
const newItems: InboxItem[] = recentItems
.filter((d) => !prevIds.has(d.id))
.map(
(item) =>
({
id: item.id,
user_id: item.userId,
search_space_id: item.searchSpaceId ?? undefined,
type: item.type,
title: item.title,
message: item.message,
read: item.read,
metadata: item.metadata as unknown as Record<string, unknown>,
created_at: new Date(item.createdAt).toISOString(),
updated_at: item.updatedAt ? new Date(item.updatedAt).toISOString() : null,
}) as InboxItem
);
let updated = prev.map((existing) => {
const liveItem = recentItems.find((v) => v.id === existing.id);
if (liveItem) {
return {
...existing,
read: liveItem.read,
title: liveItem.title,
message: liveItem.message,
metadata: liveItem.metadata as unknown as Record<string, unknown>,
} as InboxItem;
}
liveQueryRef.current = null;
}
if (unreadLiveQueryRef.current) {
try {
unreadLiveQueryRef.current.unsubscribe?.();
} catch {
/* PGlite may be closed */
}
unreadLiveQueryRef.current = null;
return existing;
});
updated = updated.filter((item) => {
if (new Date(item.created_at) < cutoff) return true;
return liveIds.has(item.id);
});
if (newItems.length > 0) {
return [...newItems, ...updated];
}
try {
const cutoffDate = getSyncCutoffDate();
return updated;
});
// Sync shape is cached by the Electric client — multiple hook instances
// calling syncShape with the same params get the same handle.
const handle = await client.syncShape({
table: "notifications",
where: `user_id = '${uid}' AND created_at > '${cutoffDate}'`,
primaryKey: ["id"],
});
if (!mounted) return;
if (!handle.isUpToDate && handle.initialSyncPromise) {
await Promise.race([
handle.initialSyncPromise,
new Promise((resolve) => setTimeout(resolve, 5000)),
]);
}
if (!mounted) return;
const db = client.db as {
live?: {
query: <T>(
sql: string,
params?: (number | string)[]
) => Promise<{
subscribe: (cb: (result: { rows: T[] }) => void) => void;
unsubscribe?: () => void;
}>;
};
};
if (!db.live?.query) return;
// Per-instance live query filtered by category types
const itemsQuery = `SELECT * FROM notifications
WHERE user_id = $1
AND (search_space_id = $2 OR search_space_id IS NULL)
AND created_at > '${cutoffDate}'
${typeFilter}
ORDER BY created_at DESC`;
const liveQuery = await db.live.query<InboxItem>(itemsQuery, [uid, spaceId]);
if (!mounted) {
liveQuery.unsubscribe?.();
return;
}
liveQuery.subscribe((result: { rows: InboxItem[] }) => {
if (!mounted || !result.rows || !initialLoadDoneRef.current) return;
const validItems = result.rows.filter((item) => item.id != null && item.title != null);
const cutoff = new Date(getSyncCutoffDate());
const liveItemMap = new Map(validItems.map((d) => [d.id, d]));
const liveIds = new Set(liveItemMap.keys());
setInboxItems((prev) => {
const prevIds = new Set(prev.map((d) => d.id));
const newItems = filterNewElectricItems(
validItems,
liveIds,
prevIds,
electricBaselineIdsRef,
newestApiTimestampRef.current
);
let updated = prev.map((item) => {
const liveItem = liveItemMap.get(item.id);
if (liveItem) return liveItem;
return item;
});
const isFullySynced = handle.isUpToDate;
if (isFullySynced) {
updated = updated.filter((item) => {
if (new Date(item.created_at) < cutoff) return true;
return liveIds.has(item.id);
});
}
if (newItems.length > 0) {
return [...newItems, ...updated];
}
return updated;
});
// Calibrate the older-unread offset using baseline items
// (items present in both Electric and the API-loaded list).
// This avoids the timing bug where new items arriving between
// the API fetch and Electric's first callback would be absorbed
// into the offset, making the count appear unchanged.
const baseline = electricBaselineIdsRef.current;
if (olderUnreadOffsetRef.current === null && baseline !== null) {
const baselineUnreadCount = validItems.filter(
(item) => baseline.has(item.id) && !item.read
).length;
olderUnreadOffsetRef.current = Math.max(
0,
apiUnreadTotalRef.current - baselineUnreadCount
);
}
// Derive unread count from all Electric items + the older offset
if (olderUnreadOffsetRef.current !== null) {
const electricUnreadCount = validItems.filter((item) => !item.read).length;
setUnreadCount(olderUnreadOffsetRef.current + electricUnreadCount);
}
});
liveQueryRef.current = liveQuery;
// Per-instance unread count live query filtered by category types.
// Acts as a secondary reactive path for read-status changes that
// may not trigger the items live query in all edge cases.
const countQuery = `SELECT COUNT(*) as count FROM notifications
WHERE user_id = $1
AND (search_space_id = $2 OR search_space_id IS NULL)
AND created_at > '${cutoffDate}'
AND read = false
${typeFilter}`;
const countLiveQuery = await db.live.query<{ count: number | string }>(countQuery, [
uid,
spaceId,
]);
if (!mounted) {
countLiveQuery.unsubscribe?.();
return;
}
countLiveQuery.subscribe((result: { rows: Array<{ count: number | string }> }) => {
if (!mounted || !result.rows?.[0] || !initialLoadDoneRef.current) return;
if (olderUnreadOffsetRef.current === null) return;
const liveRecentUnread = Number(result.rows[0].count) || 0;
setUnreadCount(olderUnreadOffsetRef.current + liveRecentUnread);
});
unreadLiveQueryRef.current = countLiveQuery;
} catch (err) {
console.error(`[useInbox:${category}] Electric setup failed:`, err);
}
// Calibrate older-unread offset on first Zero data
if (olderUnreadOffsetRef.current === null) {
const recentUnreadCount = recentItems.filter((item) => !item.read).length;
olderUnreadOffsetRef.current = Math.max(0, apiUnreadTotalRef.current - recentUnreadCount);
}
setupElectricRealtime();
return () => {
mounted = false;
// Only clean up live queries — sync shape is shared across instances
if (liveQueryRef.current) {
try {
liveQueryRef.current.unsubscribe?.();
} catch {
/* PGlite may be closed */
}
liveQueryRef.current = null;
}
if (unreadLiveQueryRef.current) {
try {
unreadLiveQueryRef.current.unsubscribe?.();
} catch {
/* PGlite may be closed */
}
unreadLiveQueryRef.current = null;
}
};
}, [userId, searchSpaceId, electricClient, category]);
if (olderUnreadOffsetRef.current !== null) {
const recentUnreadCount = recentItems.filter((item) => !item.read).length;
setUnreadCount(olderUnreadOffsetRef.current + recentUnreadCount);
}
}, [userId, searchSpaceId, zeroNotifications, categoryTypes]);
// Load more pages via API (cursor-based using before_date)
const loadMore = useCallback(async () => {

View file

@ -1,162 +0,0 @@
"use client";
import { useCallback, useEffect, useRef } from "react";
import type { RawMessage } from "@/contracts/types/chat-messages.types";
import type { SyncHandle } from "@/lib/electric/client";
import { useElectricClient } from "@/lib/electric/context";
/**
* Syncs chat messages for a thread via Electric SQL.
* Calls onMessagesUpdate when messages change.
*/
export function useMessagesElectric(
threadId: number | null,
onMessagesUpdate: (messages: RawMessage[]) => void
) {
const electricClient = useElectricClient();
const syncHandleRef = useRef<SyncHandle | null>(null);
const liveQueryRef = useRef<{ unsubscribe: () => void } | null>(null);
const syncKeyRef = useRef<string | null>(null);
const onMessagesUpdateRef = useRef(onMessagesUpdate);
useEffect(() => {
onMessagesUpdateRef.current = onMessagesUpdate;
}, [onMessagesUpdate]);
const handleMessagesUpdate = useCallback((rows: RawMessage[]) => {
onMessagesUpdateRef.current(rows);
}, []);
useEffect(() => {
if (!threadId || !electricClient) {
return;
}
const syncKey = `messages_${threadId}`;
if (syncKeyRef.current === syncKey) {
return;
}
const client = electricClient;
let mounted = true;
syncKeyRef.current = syncKey;
async function startSync() {
try {
const handle = await client.syncShape({
table: "new_chat_messages",
where: `thread_id = ${threadId}`,
columns: ["id", "thread_id", "role", "content", "author_id", "created_at"],
primaryKey: ["id"],
});
if (!handle.isUpToDate && handle.initialSyncPromise) {
try {
await Promise.race([
handle.initialSyncPromise,
new Promise((resolve) => setTimeout(resolve, 3000)),
]);
} catch (err) {
console.warn("[useMessagesElectric] Sync timeout:", err);
}
}
if (!mounted) {
handle.unsubscribe();
return;
}
syncHandleRef.current = handle;
await fetchMessages();
await setupLiveQuery();
} catch (err) {
console.warn("[useMessagesElectric] Sync failed:", err);
}
}
async function fetchMessages() {
try {
const result = await client.db.query<RawMessage>(
`SELECT id, thread_id, role, content, author_id, created_at
FROM new_chat_messages
WHERE thread_id = $1
ORDER BY created_at ASC`,
[threadId]
);
if (mounted && result.rows) {
handleMessagesUpdate(result.rows);
}
} catch (err) {
console.warn("[useMessagesElectric] Query failed:", err);
}
}
async function setupLiveQuery() {
try {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const db = client.db as any;
if (db.live?.query && typeof db.live.query === "function") {
const liveQuery = await db.live.query(
`SELECT id, thread_id, role, content, author_id, created_at
FROM new_chat_messages
WHERE thread_id = $1
ORDER BY created_at ASC`,
[threadId]
);
if (!mounted) {
liveQuery.unsubscribe?.();
return;
}
if (liveQuery.initialResults?.rows) {
handleMessagesUpdate(liveQuery.initialResults.rows);
} else if (liveQuery.rows) {
handleMessagesUpdate(liveQuery.rows);
}
if (typeof liveQuery.subscribe === "function") {
liveQuery.subscribe((result: { rows: RawMessage[] }) => {
if (mounted && result.rows) {
handleMessagesUpdate(result.rows);
}
});
}
if (typeof liveQuery.unsubscribe === "function") {
liveQueryRef.current = liveQuery;
}
}
} catch (err) {
console.warn("[useMessagesElectric] Live query failed:", err);
}
}
startSync();
return () => {
mounted = false;
syncKeyRef.current = null;
if (syncHandleRef.current) {
try {
syncHandleRef.current.unsubscribe();
} catch {
// PGlite may already be closed during cleanup
}
syncHandleRef.current = null;
}
if (liveQueryRef.current) {
try {
liveQueryRef.current.unsubscribe();
} catch {
// PGlite may already be closed during cleanup
}
liveQueryRef.current = null;
}
};
}, [threadId, electricClient, handleMessagesUpdate]);
}

View file

@ -0,0 +1,38 @@
"use client";
import { useQuery } from "@rocicorp/zero/react";
import { useEffect, useRef } from "react";
import type { RawMessage } from "@/contracts/types/chat-messages.types";
import { queries } from "@/zero/queries";
/**
* Syncs chat messages for a thread via Zero.
* Calls onMessagesUpdate when messages change.
*/
export function useMessagesSync(
threadId: number | null,
onMessagesUpdate: (messages: RawMessage[]) => void
) {
const onMessagesUpdateRef = useRef(onMessagesUpdate);
useEffect(() => {
onMessagesUpdateRef.current = onMessagesUpdate;
}, [onMessagesUpdate]);
const [messages] = useQuery(queries.messages.byThread({ threadId: threadId ?? -1 }));
useEffect(() => {
if (!threadId || !messages) return;
const mapped: RawMessage[] = messages.map((msg) => ({
id: msg.id,
thread_id: msg.threadId,
role: msg.role,
content: msg.content,
author_id: msg.authorId ?? null,
created_at: new Date(msg.createdAt).toISOString(),
}));
onMessagesUpdateRef.current(mapped);
}, [threadId, messages]);
}

View file

@ -1,20 +0,0 @@
/**
* Get auth token for Electric SQL
* In production, this should get the token from your auth system
*/
export async function getElectricAuthToken(): Promise<string> {
// For insecure mode (development), return empty string
if (process.env.NEXT_PUBLIC_ELECTRIC_AUTH_MODE === "insecure") {
return "";
}
// In production, get token from your auth system
// This should match your backend auth token
if (typeof window !== "undefined") {
const token = localStorage.getItem("surfsense_bearer_token");
return token || "";
}
return "";
}

View file

@ -1,62 +0,0 @@
import type { MutableRefObject } from "react";
/**
* Extract the newest `created_at` timestamp from a list of items.
* Used to establish the server-clock cutoff for the baseline timing-gap check.
*
* Uses Date parsing instead of string comparison because the API (Python
* isoformat: "+00:00" suffix) and Electric/PGlite ("Z" suffix, variable
* fractional-second precision) produce different string formats.
*/
export function getNewestTimestamp<T extends { created_at: string }>(items: T[]): string | null {
if (items.length === 0) return null;
let newest = items[0].created_at;
let newestMs = new Date(newest).getTime();
for (let i = 1; i < items.length; i++) {
const ms = new Date(items[i].created_at).getTime();
if (ms > newestMs) {
newest = items[i].created_at;
newestMs = ms;
}
}
return newest;
}
/**
* Identify genuinely new items from an Electric live query callback.
*
* On Electric's first callback, ALL live IDs are snapshotted as the baseline.
* Items beyond the API's first page are in this baseline and stay hidden
* (they'll appear via scroll pagination). Items created in the timing gap
* between the API fetch and Electric's first callback are rescued via the
* `newestApiTimestamp` check their `created_at` is newer than anything
* the API returned, so they pass through.
*
*/
export function filterNewElectricItems<T extends { id: number; created_at: string }>(
validItems: T[],
liveIds: Set<number>,
prevIds: Set<number>,
baselineRef: MutableRefObject<Set<number> | null>,
newestApiTimestamp: string | null
): T[] {
if (baselineRef.current === null) {
baselineRef.current = new Set(liveIds);
}
const baseline = baselineRef.current;
const cutoffMs = newestApiTimestamp ? new Date(newestApiTimestamp).getTime() : null;
const newItems = validItems.filter((item) => {
if (prevIds.has(item.id)) return false;
if (!baseline.has(item.id)) return true;
if (cutoffMs !== null && new Date(item.created_at).getTime() > cutoffMs) return true;
return false;
});
for (const item of newItems) {
baseline.add(item.id);
}
return newItems;
}

View file

@ -1,848 +0,0 @@
/**
* Electric SQL client setup for ElectricSQL 1.x with PGlite
*
* USER-SPECIFIC DATABASE ARCHITECTURE:
* - Each user gets their own IndexedDB database: idb://surfsense-{userId}-v{version}
* - On login: cleanup databases from other users, then initialize current user's DB
* - On logout: best-effort cleanup (not relied upon)
*
* This ensures:
* 1. Complete user isolation (data can never leak between users)
* 2. Self-healing on login (stale databases are cleaned up)
* 3. Works even if logout cleanup fails
*/
import { PGlite, type Transaction } from "@electric-sql/pglite";
import { live } from "@electric-sql/pglite/live";
import { electricSync } from "@electric-sql/pglite-sync";
// Debug logging - only logs in development, silent in production
const IS_DEV = process.env.NODE_ENV === "development";
function debugLog(...args: unknown[]) {
if (IS_DEV) console.log(...args);
}
function debugWarn(...args: unknown[]) {
if (IS_DEV) console.warn(...args);
}
// Types
export interface ElectricClient {
db: PGlite;
userId: string;
syncShape: (options: SyncShapeOptions) => Promise<SyncHandle>;
}
export interface SyncShapeOptions {
table: string;
where?: string;
columns?: string[];
primaryKey?: string[];
}
export interface SyncHandle {
unsubscribe: () => void;
readonly isUpToDate: boolean;
// The stream property contains the ShapeStreamInterface from pglite-sync
stream?: unknown;
// Promise that resolves when initial sync is complete
initialSyncPromise?: Promise<void>;
}
// Singleton state - now tracks the user ID
let electricClient: ElectricClient | null = null;
let currentUserId: string | null = null;
let isInitializing = false;
let initPromise: Promise<ElectricClient> | null = null;
// Cache for sync handles to prevent duplicate subscriptions (memory optimization)
const activeSyncHandles = new Map<string, SyncHandle>();
// Track pending sync operations to prevent race conditions
// If a sync is in progress, subsequent calls will wait for it instead of starting a new one
const pendingSyncs = new Map<string, Promise<SyncHandle>>();
// Version for sync state - increment this to force fresh sync when Electric config changes
// v2: user-specific database architecture
// v3: consistent cutoff date for sync+queries, visibility refresh support
// v4: heartbeat-based stale notification detection with updated_at tracking
// v5: fixed duplicate key errors, stable cutoff dates, onMustRefetch handler,
// real-time documents table with title/created_by_id/status columns,
// consolidated single documents sync, pending state for document queue visibility
// v6: added enable_summary column to search_source_connectors
// v7: fixed connector-popup using invalid category for useInbox
const SYNC_VERSION = 7;
// Database name prefix for identifying SurfSense databases
const DB_PREFIX = "surfsense-";
// Get Electric URL from environment
function getElectricUrl(): string {
if (typeof window !== "undefined") {
return process.env.NEXT_PUBLIC_ELECTRIC_URL || "http://localhost:5133";
}
return "http://localhost:5133";
}
/**
* Get the database name for a specific user
*/
function getDbName(userId: string): string {
return `idb://${DB_PREFIX}${userId}-v${SYNC_VERSION}`;
}
/**
* Clean up databases from OTHER users AND old versions
* This is called on login to ensure clean state
*/
async function cleanupOtherUserDatabases(currentUserId: string): Promise<void> {
if (typeof window === "undefined" || !window.indexedDB) {
return;
}
// The exact database identifier we want to keep (current user + current version)
// Format: "surfsense-{userId}-v{version}"
const currentDbIdentifier = `${DB_PREFIX}${currentUserId}-v${SYNC_VERSION}`;
try {
// Try to list all databases (not supported in all browsers)
if (typeof window.indexedDB.databases === "function") {
const databases = await window.indexedDB.databases();
for (const dbInfo of databases) {
const dbName = dbInfo.name;
if (!dbName) continue;
// Check if this is a SurfSense database
if (dbName.includes("surfsense")) {
// Check if this is the current database
// PGlite stores with "/pglite/" prefix, so we check if the name ENDS WITH our identifier
if (dbName.endsWith(currentDbIdentifier)) {
debugLog(`[Electric] Keeping current database: ${dbName}`);
continue;
}
// Delete ALL other databases (other users OR old versions of current user)
try {
debugLog(`[Electric] Deleting stale database: ${dbName}`);
window.indexedDB.deleteDatabase(dbName);
} catch (deleteErr) {
debugWarn(`[Electric] Failed to delete database ${dbName}:`, deleteErr);
}
}
}
}
} catch (err) {
// indexedDB.databases() not supported - that's okay, login cleanup is best-effort
debugWarn("[Electric] Could not enumerate databases for cleanup:", err);
}
}
/**
* Initialize the Electric SQL client for a specific user
*
* KEY BEHAVIORS:
* 1. If already initialized for the SAME user, returns existing client
* 2. If initialized for a DIFFERENT user, closes old client and creates new one
* 3. On first init, cleans up databases from other users
*
* @param userId - The current user's ID (required)
*/
export async function initElectric(userId: string): Promise<ElectricClient> {
if (!userId) {
throw new Error("userId is required for Electric initialization");
}
// If already initialized for this user, return existing client
if (electricClient && currentUserId === userId) {
return electricClient;
}
// If initialized for a different user, close the old client first
if (electricClient && currentUserId !== userId) {
debugLog(`[Electric] User changed from ${currentUserId} to ${userId}, reinitializing...`);
await cleanupElectric();
}
// If already initializing, wait for it
if (isInitializing && initPromise) {
return initPromise;
}
isInitializing = true;
currentUserId = userId;
initPromise = (async () => {
try {
// STEP 1: Clean up databases from other users (login-time cleanup)
debugLog("[Electric] Cleaning up databases from other users...");
await cleanupOtherUserDatabases(userId);
// STEP 2: Create user-specific PGlite database
const dbName = getDbName(userId);
debugLog(`[Electric] Initializing database: ${dbName}`);
const db = await PGlite.create({
dataDir: dbName,
relaxedDurability: true,
extensions: {
// Enable debug mode in electricSync only in development
electric: electricSync({ debug: process.env.NODE_ENV === "development" }),
live, // Enable live queries for real-time updates
},
});
// STEP 3: Create the notifications table schema in PGlite
// This matches the backend schema
await db.exec(`
CREATE TABLE IF NOT EXISTS notifications (
id INTEGER PRIMARY KEY,
user_id TEXT NOT NULL,
search_space_id INTEGER,
type TEXT NOT NULL,
title TEXT NOT NULL,
message TEXT NOT NULL,
read BOOLEAN NOT NULL DEFAULT FALSE,
metadata JSONB DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ
);
CREATE INDEX IF NOT EXISTS idx_notifications_user_id ON notifications(user_id);
CREATE INDEX IF NOT EXISTS idx_notifications_read ON notifications(read);
`);
// Create the search_source_connectors table schema in PGlite
// This matches the backend schema
await db.exec(`
CREATE TABLE IF NOT EXISTS search_source_connectors (
id INTEGER PRIMARY KEY,
search_space_id INTEGER NOT NULL,
user_id TEXT NOT NULL,
connector_type TEXT NOT NULL,
name TEXT NOT NULL,
is_indexable BOOLEAN NOT NULL DEFAULT FALSE,
last_indexed_at TIMESTAMPTZ,
config JSONB DEFAULT '{}',
periodic_indexing_enabled BOOLEAN NOT NULL DEFAULT FALSE,
indexing_frequency_minutes INTEGER,
next_scheduled_at TIMESTAMPTZ,
enable_summary BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_connectors_search_space_id ON search_source_connectors(search_space_id);
CREATE INDEX IF NOT EXISTS idx_connectors_type ON search_source_connectors(connector_type);
CREATE INDEX IF NOT EXISTS idx_connectors_user_id ON search_source_connectors(user_id);
`);
// Create the documents table schema in PGlite
// Sync columns needed for real-time table display (lightweight - no content/metadata)
await db.exec(`
CREATE TABLE IF NOT EXISTS documents (
id INTEGER PRIMARY KEY,
search_space_id INTEGER NOT NULL,
document_type TEXT NOT NULL,
title TEXT NOT NULL DEFAULT '',
created_by_id TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
status JSONB DEFAULT '{"state": "ready"}'::jsonb
);
CREATE INDEX IF NOT EXISTS idx_documents_search_space_id ON documents(search_space_id);
CREATE INDEX IF NOT EXISTS idx_documents_type ON documents(document_type);
CREATE INDEX IF NOT EXISTS idx_documents_search_space_type ON documents(search_space_id, document_type);
CREATE INDEX IF NOT EXISTS idx_documents_status ON documents((status->>'state'));
`);
await db.exec(`
CREATE TABLE IF NOT EXISTS chat_comment_mentions (
id INTEGER PRIMARY KEY,
comment_id INTEGER NOT NULL,
mentioned_user_id TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_chat_comment_mentions_user_id ON chat_comment_mentions(mentioned_user_id);
CREATE INDEX IF NOT EXISTS idx_chat_comment_mentions_comment_id ON chat_comment_mentions(comment_id);
`);
// Create chat_comments table for live comment sync
await db.exec(`
CREATE TABLE IF NOT EXISTS chat_comments (
id INTEGER PRIMARY KEY,
message_id INTEGER NOT NULL,
thread_id INTEGER NOT NULL,
parent_id INTEGER,
author_id TEXT,
content TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_chat_comments_thread_id ON chat_comments(thread_id);
CREATE INDEX IF NOT EXISTS idx_chat_comments_message_id ON chat_comments(message_id);
CREATE INDEX IF NOT EXISTS idx_chat_comments_parent_id ON chat_comments(parent_id);
`);
// Create new_chat_messages table for live message sync
await db.exec(`
CREATE TABLE IF NOT EXISTS new_chat_messages (
id INTEGER PRIMARY KEY,
thread_id INTEGER NOT NULL,
role TEXT NOT NULL,
content JSONB NOT NULL,
author_id TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_new_chat_messages_thread_id ON new_chat_messages(thread_id);
CREATE INDEX IF NOT EXISTS idx_new_chat_messages_created_at ON new_chat_messages(created_at);
`);
const electricUrl = getElectricUrl();
// STEP 4: Create the client wrapper
electricClient = {
db,
userId,
syncShape: async (options: SyncShapeOptions): Promise<SyncHandle> => {
const { table, where, columns, primaryKey = ["id"] } = options;
// Create cache key for this sync shape
const cacheKey = `${table}_${where || "all"}_${columns?.join(",") || "all"}`;
// Check if we already have an active sync for this shape (memory optimization)
const existingHandle = activeSyncHandles.get(cacheKey);
if (existingHandle) {
debugLog(`[Electric] Reusing existing sync handle for: ${cacheKey}`);
return existingHandle;
}
// Check if there's already a pending sync for this shape (prevent race condition)
const pendingSync = pendingSyncs.get(cacheKey);
if (pendingSync) {
debugLog(`[Electric] Waiting for pending sync to complete: ${cacheKey}`);
return pendingSync;
}
// Create and track the sync promise to prevent race conditions
const syncPromise = (async (): Promise<SyncHandle> => {
// Build params for the shape request
// Electric SQL expects params as URL query parameters
const params: Record<string, string> = { table };
// Validate and fix WHERE clause to ensure string literals are properly quoted
let validatedWhere = where;
if (where) {
// Check if where uses positional parameters
if (where.includes("$1")) {
// Extract the value from the where clause if it's embedded
// For now, we'll use the where clause as-is and let Electric handle it
params.where = where;
validatedWhere = where;
} else {
// Validate that string literals are properly quoted
// Count single quotes - should be even (pairs) for properly quoted strings
const singleQuoteCount = (where.match(/'/g) || []).length;
if (singleQuoteCount % 2 !== 0) {
// Odd number of quotes means unterminated string literal
debugWarn("Where clause has unmatched quotes, fixing:", where);
// Add closing quote at the end
validatedWhere = `${where}'`;
params.where = validatedWhere;
} else {
// Use the where clause directly (already formatted)
params.where = where;
validatedWhere = where;
}
}
}
if (columns) params.columns = columns.join(",");
debugLog("[Electric] Syncing shape with params:", params);
debugLog("[Electric] Electric URL:", `${electricUrl}/v1/shape`);
debugLog("[Electric] Where clause:", where, "Validated:", validatedWhere);
try {
// Debug: Test Electric SQL connection directly first (DEV ONLY - skipped in production)
if (process.env.NODE_ENV === "development") {
const testUrl = `${electricUrl}/v1/shape?table=${table}&offset=-1${validatedWhere ? `&where=${encodeURIComponent(validatedWhere)}` : ""}`;
debugLog("[Electric] Testing Electric SQL directly:", testUrl);
try {
const testResponse = await fetch(testUrl);
const testHeaders = {
handle: testResponse.headers.get("electric-handle"),
offset: testResponse.headers.get("electric-offset"),
upToDate: testResponse.headers.get("electric-up-to-date"),
};
debugLog("[Electric] Direct Electric SQL response headers:", testHeaders);
const testData = await testResponse.json();
debugLog(
"[Electric] Direct Electric SQL data count:",
Array.isArray(testData) ? testData.length : "not array",
testData
);
} catch (testErr) {
console.error("[Electric] Direct Electric SQL test failed:", testErr);
}
}
// Use PGlite's electric sync plugin to sync the shape
// According to Electric SQL docs, the shape config uses params for table, where, columns
// Note: mapColumns is OPTIONAL per pglite-sync types.ts
// Create a promise that resolves when initial sync is complete
// Using recommended approach: check isUpToDate immediately, watch stream, shorter timeout
// IMPORTANT: We don't unsubscribe from the stream - it must stay active for real-time updates
let syncResolved = false;
// Initialize with no-op functions to satisfy TypeScript
let resolveInitialSync: () => void = () => {};
let rejectInitialSync: (error: Error) => void = () => {};
const initialSyncPromise = new Promise<void>((resolve, reject) => {
resolveInitialSync = () => {
if (!syncResolved) {
syncResolved = true;
// DON'T unsubscribe from stream - it needs to stay active for real-time updates
resolve();
}
};
rejectInitialSync = (error: Error) => {
if (!syncResolved) {
syncResolved = true;
// DON'T unsubscribe from stream even on error - let Electric handle it
reject(error);
}
};
// Shorter timeout (5 seconds) as fallback
setTimeout(() => {
if (!syncResolved) {
debugWarn(
`[Electric] ⚠️ Sync timeout for ${table} - checking isUpToDate one more time...`
);
// Check isUpToDate one more time before resolving
// This will be checked after shape is created
setTimeout(() => {
if (!syncResolved) {
debugWarn(
`[Electric] ⚠️ Sync timeout for ${table} - resolving anyway after 5s`
);
resolveInitialSync();
}
}, 100);
}
}, 5000);
});
// ROOT CAUSE FIX: The duplicate key errors were caused by unstable cutoff dates
// in use-inbox.ts generating different sync keys on each render.
// That's now fixed (rounded to midnight UTC in getSyncCutoffDate).
// We can safely use shapeKey for fast incremental sync.
const shapeKey = `${userId}_v${SYNC_VERSION}_${table}_${where?.replace(/[^a-zA-Z0-9]/g, "_") || "all"}`;
// Type assertion to PGlite with electric extension
const pgWithElectric = db as unknown as {
electric: {
syncShapeToTable: (
config: Record<string, unknown>
) => Promise<{ unsubscribe: () => void; isUpToDate: boolean; stream: unknown }>;
};
};
const shapeConfig = {
shape: {
url: `${electricUrl}/v1/shape`,
params: {
table,
...(validatedWhere ? { where: validatedWhere } : {}),
...(columns ? { columns: columns.join(",") } : {}),
},
},
table,
primaryKey,
shapeKey, // Re-enabled for fast incremental sync (root cause in use-inbox.ts is fixed)
onInitialSync: () => {
debugLog(
`[Electric] ✅ Initial sync complete for ${table} - data should now be in PGlite`
);
resolveInitialSync();
},
onError: (error: Error) => {
console.error(`[Electric] ❌ Shape sync error for ${table}:`, error);
console.error(
"[Electric] Error details:",
JSON.stringify(error, Object.getOwnPropertyNames(error))
);
rejectInitialSync(error);
},
// Handle must-refetch: clear table data before Electric re-inserts from scratch
// This prevents "duplicate key" errors when the shape is invalidated
onMustRefetch: async (tx: Transaction) => {
debugLog(
`[Electric] ⚠️ Must refetch triggered for ${table} - clearing existing data`
);
try {
// Delete rows matching the shape's WHERE clause
// If no WHERE clause, delete all rows from the table
if (validatedWhere) {
// Parse the WHERE clause to build a DELETE statement
// The WHERE clause is already validated and formatted
await tx.exec(`DELETE FROM ${table} WHERE ${validatedWhere}`);
debugLog(`[Electric] 🗑️ Cleared ${table} rows matching: ${validatedWhere}`);
} else {
// No WHERE clause means we're syncing the entire table
await tx.exec(`DELETE FROM ${table}`);
debugLog(`[Electric] 🗑️ Cleared all rows from ${table}`);
}
} catch (cleanupError) {
console.error(
`[Electric] ❌ Failed to clear ${table} during must-refetch:`,
cleanupError
);
// Re-throw to let Electric handle the error
throw cleanupError;
}
},
};
debugLog("[Electric] syncShapeToTable config:", JSON.stringify(shapeConfig, null, 2));
let shape: { unsubscribe: () => void; isUpToDate: boolean; stream: unknown };
try {
shape = await pgWithElectric.electric.syncShapeToTable(shapeConfig);
} catch (syncError) {
// Handle "Already syncing" error - pglite-sync might not have fully cleaned up yet
const errorMessage =
syncError instanceof Error ? syncError.message : String(syncError);
if (errorMessage.includes("Already syncing")) {
debugWarn(
`[Electric] Already syncing ${table}, waiting for existing sync to settle...`
);
// Wait a short time for pglite-sync to settle
await new Promise((resolve) => setTimeout(resolve, 100));
// Check if an active handle now exists (another sync might have completed)
const existingHandle = activeSyncHandles.get(cacheKey);
if (existingHandle) {
debugLog(`[Electric] Found existing handle after waiting: ${cacheKey}`);
return existingHandle;
}
// Retry once after waiting
debugLog(`[Electric] Retrying sync for ${table}...`);
try {
shape = await pgWithElectric.electric.syncShapeToTable(shapeConfig);
} catch (retryError) {
const retryMessage =
retryError instanceof Error ? retryError.message : String(retryError);
if (retryMessage.includes("Already syncing")) {
// Still syncing - create a placeholder handle that indicates the table is being synced
debugWarn(`[Electric] ${table} still syncing, creating placeholder handle`);
const placeholderHandle: SyncHandle = {
unsubscribe: () => {
debugLog(`[Electric] Placeholder unsubscribe for: ${cacheKey}`);
activeSyncHandles.delete(cacheKey);
},
get isUpToDate() {
return false; // We don't know the real state
},
stream: undefined,
initialSyncPromise: Promise.resolve(), // Already syncing means data should be coming
};
activeSyncHandles.set(cacheKey, placeholderHandle);
return placeholderHandle;
}
throw retryError;
}
} else {
throw syncError;
}
}
if (!shape) {
throw new Error("syncShapeToTable returned undefined");
}
// Log the actual shape result structure
debugLog("[Electric] Shape sync result (initial):", {
hasUnsubscribe: typeof shape?.unsubscribe === "function",
isUpToDate: shape?.isUpToDate,
hasStream: !!shape?.stream,
streamType: typeof shape?.stream,
});
// Recommended Approach Step 1: Check isUpToDate immediately
if (shape.isUpToDate) {
debugLog(
`[Electric] ✅ Sync already up-to-date for ${table} (resuming from previous state)`
);
resolveInitialSync();
} else {
// Recommended Approach Step 2: Subscribe to stream and watch for "up-to-date" message
if (shape?.stream) {
const stream = shape.stream as any;
debugLog("[Electric] Shape stream details:", {
shapeHandle: stream?.shapeHandle,
lastOffset: stream?.lastOffset,
isUpToDate: stream?.isUpToDate,
error: stream?.error,
hasSubscribe: typeof stream?.subscribe === "function",
hasUnsubscribe: typeof stream?.unsubscribe === "function",
});
// Subscribe to the stream to watch for "up-to-date" control message
// NOTE: We keep this subscription active - don't unsubscribe!
// The stream is what Electric SQL uses for real-time updates
if (typeof stream?.subscribe === "function") {
debugLog(
"[Electric] Subscribing to shape stream to watch for up-to-date message..."
);
// Subscribe but don't store unsubscribe - we want it to stay active
stream.subscribe((messages: unknown[]) => {
// Continue receiving updates even after sync is resolved
if (!syncResolved) {
debugLog(
"[Electric] 🔵 Shape stream received messages:",
messages?.length || 0
);
}
// Check if any message indicates sync is complete
if (messages && messages.length > 0) {
for (const message of messages) {
const msg = message as any;
// Check for "up-to-date" control message
if (
msg?.headers?.control === "up-to-date" ||
msg?.headers?.electric_up_to_date === "true" ||
(typeof msg === "object" && "up-to-date" in msg)
) {
if (!syncResolved) {
debugLog(`[Electric] ✅ Received up-to-date message for ${table}`);
resolveInitialSync();
}
// Continue listening for real-time updates - don't return!
}
}
if (!syncResolved && messages.length > 0) {
debugLog(
"[Electric] First message:",
JSON.stringify(messages[0], null, 2)
);
}
}
// Also check stream's isUpToDate property after receiving messages
if (!syncResolved && stream?.isUpToDate) {
debugLog(`[Electric] ✅ Stream isUpToDate is true for ${table}`);
resolveInitialSync();
}
});
// Also check stream's isUpToDate property immediately
if (stream?.isUpToDate) {
debugLog(`[Electric] ✅ Stream isUpToDate is true immediately for ${table}`);
resolveInitialSync();
}
}
// Also poll isUpToDate periodically as a backup (every 200ms)
const pollInterval = setInterval(() => {
if (syncResolved) {
clearInterval(pollInterval);
return;
}
if (shape.isUpToDate || stream?.isUpToDate) {
debugLog(`[Electric] ✅ Sync completed (detected via polling) for ${table}`);
clearInterval(pollInterval);
resolveInitialSync();
}
}, 200);
// Clean up polling when promise resolves
initialSyncPromise.finally(() => {
clearInterval(pollInterval);
});
} else {
debugWarn(
`[Electric] ⚠️ No stream available for ${table}, relying on callback and timeout`
);
}
}
// Create the sync handle with proper cleanup
const syncHandle: SyncHandle = {
unsubscribe: () => {
debugLog(`[Electric] Unsubscribing from: ${cacheKey}`);
// Remove from cache first
activeSyncHandles.delete(cacheKey);
// Then unsubscribe from the shape
if (shape && typeof shape.unsubscribe === "function") {
shape.unsubscribe();
}
},
// Use getter to always return current state
get isUpToDate() {
return shape?.isUpToDate ?? false;
},
stream: shape?.stream,
initialSyncPromise, // Expose promise so callers can wait for sync
};
// Cache the sync handle for reuse (memory optimization)
activeSyncHandles.set(cacheKey, syncHandle);
debugLog(
`[Electric] Cached sync handle for: ${cacheKey} (total cached: ${activeSyncHandles.size})`
);
return syncHandle;
} catch (error) {
console.error("[Electric] Failed to sync shape:", error);
// Check if Electric SQL server is reachable
try {
const response = await fetch(`${electricUrl}/v1/shape?table=${table}&offset=-1`, {
method: "GET",
});
debugLog(
"[Electric] Electric SQL server response:",
response.status,
response.statusText
);
if (!response.ok) {
console.error("[Electric] Electric SQL server error:", await response.text());
}
} catch (fetchError) {
console.error("[Electric] Cannot reach Electric SQL server:", fetchError);
console.error("[Electric] Make sure Electric SQL is running at:", electricUrl);
}
throw error;
}
})();
// Track the sync promise to prevent concurrent syncs for the same shape
pendingSyncs.set(cacheKey, syncPromise);
// Clean up the pending sync when done (whether success or failure)
syncPromise.finally(() => {
pendingSyncs.delete(cacheKey);
debugLog(`[Electric] Pending sync removed for: ${cacheKey}`);
});
return syncPromise;
},
};
debugLog(`[Electric] ✅ Initialized successfully for user: ${userId}`);
return electricClient;
} catch (error) {
console.error("[Electric] Failed to initialize:", error);
// Reset state on failure
electricClient = null;
currentUserId = null;
throw error;
} finally {
isInitializing = false;
}
})();
return initPromise;
}
/**
* Cleanup Electric SQL - close database and reset singleton
* Called on logout (best-effort) and when switching users
*/
export async function cleanupElectric(): Promise<void> {
if (!electricClient) {
return;
}
const userIdToClean = currentUserId;
debugLog(`[Electric] Cleaning up for user: ${userIdToClean}`);
// Unsubscribe from all active sync handles first (memory cleanup)
debugLog(`[Electric] Unsubscribing from ${activeSyncHandles.size} active sync handles`);
// Copy keys to array to avoid mutation during iteration
const handleKeys = Array.from(activeSyncHandles.keys());
for (const key of handleKeys) {
const handle = activeSyncHandles.get(key);
if (handle) {
try {
handle.unsubscribe();
} catch (err) {
debugWarn(`[Electric] Failed to unsubscribe from ${key}:`, err);
}
}
}
// Ensure caches are empty
activeSyncHandles.clear();
pendingSyncs.clear();
try {
// Close the PGlite database connection
await electricClient.db.close();
debugLog("[Electric] Database closed");
} catch (error) {
console.error("[Electric] Error closing database:", error);
}
// Reset singleton state
electricClient = null;
currentUserId = null;
isInitializing = false;
initPromise = null;
// Delete the user's IndexedDB database (best-effort cleanup on logout)
if (typeof window !== "undefined" && window.indexedDB && userIdToClean) {
try {
const dbName = `${DB_PREFIX}${userIdToClean}-v${SYNC_VERSION}`;
window.indexedDB.deleteDatabase(dbName);
debugLog(`[Electric] Deleted database: ${dbName}`);
} catch (err) {
debugWarn("[Electric] Failed to delete database:", err);
}
}
debugLog("[Electric] Cleanup complete");
}
/**
* Get the Electric client (throws if not initialized)
*/
export function getElectric(): ElectricClient {
if (!electricClient) {
throw new Error("Electric not initialized. Call initElectric(userId) first.");
}
return electricClient;
}
/**
* Check if Electric is initialized for a specific user
*/
export function isElectricInitialized(userId?: string): boolean {
if (!electricClient) return false;
if (userId && currentUserId !== userId) return false;
return true;
}
/**
* Get the current user ID that Electric is initialized for
*/
export function getCurrentElectricUserId(): string | null {
return currentUserId;
}
/**
* Get the PGlite database instance
*/
export function getDb(): PGlite | null {
return electricClient?.db ?? null;
}

View file

@ -1,36 +0,0 @@
"use client";
import { createContext, useContext } from "react";
import type { ElectricClient } from "./client";
/**
* Context for sharing the Electric SQL client across the app
*
* This ensures:
* 1. Single initialization point (ElectricProvider only)
* 2. No race conditions (hooks wait for context)
* 3. Clean cleanup (ElectricProvider manages lifecycle)
*/
export const ElectricContext = createContext<ElectricClient | null>(null);
/**
* Hook to get the Electric client from context
* Returns null if Electric is not initialized yet
*/
export function useElectricClient(): ElectricClient | null {
return useContext(ElectricContext);
}
/**
* Hook to get the Electric client, throwing if not available
* Use this when you're sure Electric should be initialized
*/
export function useElectricClientOrThrow(): ElectricClient {
const client = useContext(ElectricContext);
if (!client) {
throw new Error(
"Electric client not available. Make sure you're inside ElectricProvider and user is authenticated."
);
}
return client;
}

View file

@ -26,10 +26,6 @@
"@assistant-ui/react": "^0.12.19",
"@assistant-ui/react-markdown": "^0.12.6",
"@babel/standalone": "^7.29.2",
"@electric-sql/client": "^1.4.0",
"@electric-sql/pglite": "^0.3.14",
"@electric-sql/pglite-sync": "^0.4.0",
"@electric-sql/react": "^1.0.26",
"@hookform/resolvers": "^5.2.2",
"@number-flow/react": "^0.5.10",
"@platejs/autoformat": "^52.0.11",
@ -75,6 +71,7 @@
"@remotion/media": "^4.0.438",
"@remotion/player": "^4.0.438",
"@remotion/web-renderer": "^4.0.438",
"@rocicorp/zero": "^0.26.2",
"@slate-serializers/html": "^2.2.3",
"@streamdown/code": "^1.0.2",
"@streamdown/math": "^1.0.2",

File diff suppressed because it is too large Load diff

14
surfsense_web/types/zero.d.ts vendored Normal file
View file

@ -0,0 +1,14 @@
import type { Schema } from "@/zero/schema/index";
export type Context =
| {
userId: string;
}
| undefined;
declare module "@rocicorp/zero" {
interface DefaultTypes {
context: Context;
schema: Schema;
}
}

View file

@ -0,0 +1,21 @@
import { defineQuery } from "@rocicorp/zero";
import { z } from "zod";
import { zql } from "../schema/index";
export const messageQueries = {
byThread: defineQuery(z.object({ threadId: z.number() }), ({ args: { threadId } }) =>
zql.new_chat_messages.where("threadId", threadId).orderBy("createdAt", "asc")
),
};
export const commentQueries = {
byThread: defineQuery(z.object({ threadId: z.number() }), ({ args: { threadId } }) =>
zql.chat_comments.where("threadId", threadId).orderBy("createdAt", "asc")
),
};
export const chatSessionQueries = {
byThread: defineQuery(z.object({ threadId: z.number() }), ({ args: { threadId } }) =>
zql.chat_session_state.where("threadId", threadId).one()
),
};

View file

@ -0,0 +1,15 @@
import { defineQuery } from "@rocicorp/zero";
import { z } from "zod";
import { zql } from "../schema/index";
export const documentQueries = {
bySpace: defineQuery(z.object({ searchSpaceId: z.number() }), ({ args: { searchSpaceId } }) =>
zql.documents.where("searchSpaceId", searchSpaceId).orderBy("createdAt", "desc")
),
};
export const connectorQueries = {
bySpace: defineQuery(z.object({ searchSpaceId: z.number() }), ({ args: { searchSpaceId } }) =>
zql.search_source_connectors.where("searchSpaceId", searchSpaceId).orderBy("createdAt", "desc")
),
};

View file

@ -0,0 +1,9 @@
import { defineQuery } from "@rocicorp/zero";
import { z } from "zod";
import { zql } from "../schema/index";
export const notificationQueries = {
byUser: defineQuery(z.object({ userId: z.string() }), ({ args: { userId } }) =>
zql.notifications.where("userId", userId).orderBy("createdAt", "desc")
),
};

View file

@ -0,0 +1,13 @@
import { defineQueries } from "@rocicorp/zero";
import { chatSessionQueries, commentQueries, messageQueries } from "./chat";
import { connectorQueries, documentQueries } from "./documents";
import { notificationQueries } from "./inbox";
export const queries = defineQueries({
notifications: notificationQueries,
documents: documentQueries,
connectors: connectorQueries,
messages: messageQueries,
comments: commentQueries,
chatSession: chatSessionQueries,
});

View file

@ -0,0 +1,34 @@
import { json, number, string, table } from "@rocicorp/zero";
export const newChatMessageTable = table("new_chat_messages")
.columns({
id: number(),
role: string(),
content: json(),
threadId: number().from("thread_id"),
authorId: string().optional().from("author_id"),
createdAt: number().from("created_at"),
})
.primaryKey("id");
export const chatCommentTable = table("chat_comments")
.columns({
id: number(),
messageId: number().from("message_id"),
threadId: number().from("thread_id"),
parentId: number().optional().from("parent_id"),
authorId: string().optional().from("author_id"),
content: string(),
createdAt: number().from("created_at"),
updatedAt: number().from("updated_at"),
})
.primaryKey("id");
export const chatSessionStateTable = table("chat_session_state")
.columns({
id: number(),
threadId: number().from("thread_id"),
aiRespondingToUserId: string().optional().from("ai_responding_to_user_id"),
updatedAt: number().from("updated_at"),
})
.primaryKey("id");

View file

@ -0,0 +1,31 @@
import { boolean, json, number, string, table } from "@rocicorp/zero";
export const documentTable = table("documents")
.columns({
id: number(),
title: string(),
documentType: string().from("document_type"),
searchSpaceId: number().from("search_space_id"),
createdById: string().optional().from("created_by_id"),
status: json(),
createdAt: number().from("created_at"),
})
.primaryKey("id");
export const searchSourceConnectorTable = table("search_source_connectors")
.columns({
id: number(),
name: string(),
connectorType: string().from("connector_type"),
isIndexable: boolean().from("is_indexable"),
lastIndexedAt: number().optional().from("last_indexed_at"),
config: json(),
enableSummary: boolean().from("enable_summary"),
periodicIndexingEnabled: boolean().from("periodic_indexing_enabled"),
indexingFrequencyMinutes: number().optional().from("indexing_frequency_minutes"),
nextScheduledAt: number().optional().from("next_scheduled_at"),
searchSpaceId: number().from("search_space_id"),
userId: string().from("user_id"),
createdAt: number().from("created_at"),
})
.primaryKey("id");

View file

@ -0,0 +1,16 @@
import { boolean, json, number, string, table } from "@rocicorp/zero";
export const notificationTable = table("notifications")
.columns({
id: number(),
userId: string().from("user_id"),
searchSpaceId: number().optional().from("search_space_id"),
type: string(),
title: string(),
message: string(),
read: boolean(),
metadata: json().optional(),
createdAt: number().from("created_at"),
updatedAt: number().optional().from("updated_at"),
})
.primaryKey("id");

View file

@ -0,0 +1,41 @@
import { createBuilder, createSchema, relationships } from "@rocicorp/zero";
import { chatCommentTable, chatSessionStateTable, newChatMessageTable } from "./chat";
import { documentTable, searchSourceConnectorTable } from "./documents";
import { notificationTable } from "./inbox";
const chatCommentRelationships = relationships(chatCommentTable, ({ one }) => ({
message: one({
sourceField: ["messageId"],
destSchema: newChatMessageTable,
destField: ["id"],
}),
parent: one({
sourceField: ["parentId"],
destSchema: chatCommentTable,
destField: ["id"],
}),
}));
const newChatMessageRelationships = relationships(newChatMessageTable, ({ many }) => ({
comments: many({
sourceField: ["id"],
destSchema: chatCommentTable,
destField: ["messageId"],
}),
}));
export const schema = createSchema({
tables: [
notificationTable,
documentTable,
searchSourceConnectorTable,
newChatMessageTable,
chatCommentTable,
chatSessionStateTable,
],
relationships: [chatCommentRelationships, newChatMessageRelationships],
});
export type Schema = typeof schema;
export const zql = createBuilder(schema);