diff --git a/README.md b/README.md index 3b4ed36..916864b 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ # SurfSense -While tools like NotebookLM and Perplexity are impressive and highly effective for conducting research on any topic/query, SurfSense elevates this capability by integrating with your personal knowledge base. It is a highly customizable AI research agent, connected to external sources such as search engines (Tavily, LinkUp), Slack, Linear, Notion, YouTube, GitHub, Discord and more to come. +While tools like NotebookLM and Perplexity are impressive and highly effective for conducting research on any topic/query, SurfSense elevates this capability by integrating with your personal knowledge base. It is a highly customizable AI research agent, connected to external sources such as search engines (Tavily, LinkUp), Slack, Linear, Jira, Notion, YouTube, GitHub, Discord and more to come.
MODSetter%2FSurfSense | Trendshift @@ -63,6 +63,7 @@ Open source and easy to deploy locally. - Search Engines (Tavily, LinkUp) - Slack - Linear +- Jira - Notion - Youtube Videos - GitHub diff --git a/node_modules/.cache/prettier/.prettier-caches/a2ecb2962bf19c1099cfe708e42daa0097f94976.json b/node_modules/.cache/prettier/.prettier-caches/a2ecb2962bf19c1099cfe708e42daa0097f94976.json new file mode 100644 index 0000000..e744e3a --- /dev/null +++ b/node_modules/.cache/prettier/.prettier-caches/a2ecb2962bf19c1099cfe708e42daa0097f94976.json @@ -0,0 +1 @@ +{"2d0ec64d93969318101ee479b664221b32241665":{"files":{"surfsense_web/app/dashboard/[search_space_id]/documents/(manage)/page.tsx":["EHKKvlOK0vfy0GgHwlG/J2Bx5rw=",true]},"modified":1753426633288}} \ No newline at end of file diff --git a/surfsense_backend/alembic/versions/11_add_llm_config_table_and_relationships.py b/surfsense_backend/alembic/versions/11_add_llm_config_table_and_relationships.py index f807f8b..0a6a107 100644 --- a/surfsense_backend/alembic/versions/11_add_llm_config_table_and_relationships.py +++ b/surfsense_backend/alembic/versions/11_add_llm_config_table_and_relationships.py @@ -20,70 +20,119 @@ depends_on: str | Sequence[str] | None = None def upgrade() -> None: """Upgrade schema - add LiteLLMProvider enum, LLMConfig table and user LLM preferences.""" - # Check if enum type exists and create if it doesn't - op.execute(""" + # Create enum only if not exists + op.execute( + """ DO $$ BEGIN IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'litellmprovider') THEN - CREATE TYPE litellmprovider AS ENUM ('OPENAI', 'ANTHROPIC', 'GROQ', 'COHERE', 'HUGGINGFACE', 'AZURE_OPENAI', 'GOOGLE', 'AWS_BEDROCK', 'OLLAMA', 'MISTRAL', 'TOGETHER_AI', 'REPLICATE', 'PALM', 'VERTEX_AI', 'ANYSCALE', 'PERPLEXITY', 'DEEPINFRA', 'AI21', 'NLPCLOUD', 'ALEPH_ALPHA', 'PETALS', 'CUSTOM'); + CREATE TYPE litellmprovider AS ENUM ( + 'OPENAI', 'ANTHROPIC', 'GROQ', 'COHERE', 'HUGGINGFACE', + 'AZURE_OPENAI', 'GOOGLE', 'AWS_BEDROCK', 'OLLAMA', 'MISTRAL', + 'TOGETHER_AI', 'REPLICATE', 'PALM', 'VERTEX_AI', 'ANYSCALE', + 'PERPLEXITY', 'DEEPINFRA', 'AI21', 'NLPCLOUD', 'ALEPH_ALPHA', + 'PETALS', 'CUSTOM' + ); END IF; END$$; - """) - - # Create llm_configs table using raw SQL to avoid enum creation conflicts - op.execute(""" - CREATE TABLE llm_configs ( - id SERIAL PRIMARY KEY, - created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), - name VARCHAR(100) NOT NULL, - provider litellmprovider NOT NULL, - custom_provider VARCHAR(100), - model_name VARCHAR(100) NOT NULL, - api_key TEXT NOT NULL, - api_base VARCHAR(500), - litellm_params JSONB, - user_id UUID NOT NULL REFERENCES "user"(id) ON DELETE CASCADE - ) - """) - - # Create indexes - op.create_index(op.f("ix_llm_configs_id"), "llm_configs", ["id"], unique=False) - op.create_index( - op.f("ix_llm_configs_created_at"), "llm_configs", ["created_at"], unique=False + """ ) - op.create_index(op.f("ix_llm_configs_name"), "llm_configs", ["name"], unique=False) - # Add LLM preference columns to user table - op.add_column("user", sa.Column("long_context_llm_id", sa.Integer(), nullable=True)) - op.add_column("user", sa.Column("fast_llm_id", sa.Integer(), nullable=True)) - op.add_column("user", sa.Column("strategic_llm_id", sa.Integer(), nullable=True)) + # Create llm_configs table only if it doesn't already exist + op.execute( + """ + DO $$ + BEGIN + IF NOT EXISTS ( + SELECT FROM information_schema.tables + WHERE table_name = 'llm_configs' + ) THEN + CREATE TABLE llm_configs ( + id SERIAL PRIMARY KEY, + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + name VARCHAR(100) NOT NULL, + provider litellmprovider NOT NULL, + custom_provider VARCHAR(100), + model_name VARCHAR(100) NOT NULL, + api_key TEXT NOT NULL, + api_base VARCHAR(500), + litellm_params JSONB, + user_id UUID NOT NULL REFERENCES "user"(id) ON DELETE CASCADE + ); + END IF; + END$$; + """ + ) - # Create foreign key constraints for LLM preferences - op.create_foreign_key( - op.f("fk_user_long_context_llm_id_llm_configs"), - "user", - "llm_configs", - ["long_context_llm_id"], - ["id"], - ondelete="SET NULL", - ) - op.create_foreign_key( - op.f("fk_user_fast_llm_id_llm_configs"), - "user", - "llm_configs", - ["fast_llm_id"], - ["id"], - ondelete="SET NULL", - ) - op.create_foreign_key( - op.f("fk_user_strategic_llm_id_llm_configs"), - "user", - "llm_configs", - ["strategic_llm_id"], - ["id"], - ondelete="SET NULL", + # Create indexes if they don't exist + op.execute( + """ + DO $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_indexes + WHERE tablename = 'llm_configs' AND indexname = 'ix_llm_configs_id' + ) THEN + CREATE INDEX ix_llm_configs_id ON llm_configs(id); + END IF; + + IF NOT EXISTS ( + SELECT 1 FROM pg_indexes + WHERE tablename = 'llm_configs' AND indexname = 'ix_llm_configs_created_at' + ) THEN + CREATE INDEX ix_llm_configs_created_at ON llm_configs(created_at); + END IF; + + IF NOT EXISTS ( + SELECT 1 FROM pg_indexes + WHERE tablename = 'llm_configs' AND indexname = 'ix_llm_configs_name' + ) THEN + CREATE INDEX ix_llm_configs_name ON llm_configs(name); + END IF; + END$$; + """ ) + # Safely add columns to user table + bind = op.get_bind() + inspector = sa.inspect(bind) + existing_columns = [col["name"] for col in inspector.get_columns("user")] + + with op.batch_alter_table("user") as batch_op: + if "long_context_llm_id" not in existing_columns: + batch_op.add_column( + sa.Column("long_context_llm_id", sa.Integer(), nullable=True) + ) + batch_op.create_foreign_key( + op.f("fk_user_long_context_llm_id_llm_configs"), + "llm_configs", + ["long_context_llm_id"], + ["id"], + ondelete="SET NULL", + ) + + if "fast_llm_id" not in existing_columns: + batch_op.add_column(sa.Column("fast_llm_id", sa.Integer(), nullable=True)) + batch_op.create_foreign_key( + op.f("fk_user_fast_llm_id_llm_configs"), + "llm_configs", + ["fast_llm_id"], + ["id"], + ondelete="SET NULL", + ) + + if "strategic_llm_id" not in existing_columns: + batch_op.add_column( + sa.Column("strategic_llm_id", sa.Integer(), nullable=True) + ) + batch_op.create_foreign_key( + op.f("fk_user_strategic_llm_id_llm_configs"), + "llm_configs", + ["strategic_llm_id"], + ["id"], + ondelete="SET NULL", + ) + def downgrade() -> None: """Downgrade schema - remove LLMConfig table and user LLM preferences.""" diff --git a/surfsense_backend/alembic/versions/12_add_logs_table.py b/surfsense_backend/alembic/versions/12_add_logs_table.py index 9e12fe6..947c77c 100644 --- a/surfsense_backend/alembic/versions/12_add_logs_table.py +++ b/surfsense_backend/alembic/versions/12_add_logs_table.py @@ -6,6 +6,8 @@ Revises: 11 from collections.abc import Sequence +from sqlalchemy import inspect + from alembic import op # revision identifiers, used by Alembic. @@ -18,47 +20,73 @@ depends_on: str | Sequence[str] | None = None def upgrade() -> None: """Upgrade schema - add LogLevel and LogStatus enums and logs table.""" - # Create LogLevel enum - op.execute(""" - CREATE TYPE loglevel AS ENUM ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL') - """) + # Create LogLevel enum if it doesn't exist + op.execute( + """ + DO $$ + BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'loglevel') THEN + CREATE TYPE loglevel AS ENUM ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'); + END IF; + END$$; + """ + ) - # Create LogStatus enum - op.execute(""" - CREATE TYPE logstatus AS ENUM ('IN_PROGRESS', 'SUCCESS', 'FAILED') - """) + # Create LogStatus enum if it doesn't exist + op.execute( + """ + DO $$ + BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'logstatus') THEN + CREATE TYPE logstatus AS ENUM ('IN_PROGRESS', 'SUCCESS', 'FAILED'); + END IF; + END$$; + """ + ) - # Create logs table - op.execute(""" - CREATE TABLE logs ( + # Create logs table if it doesn't exist + op.execute( + """ + CREATE TABLE IF NOT EXISTS logs ( id SERIAL PRIMARY KEY, - created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), level loglevel NOT NULL, status logstatus NOT NULL, message TEXT NOT NULL, source VARCHAR(200), log_metadata JSONB DEFAULT '{}', search_space_id INTEGER NOT NULL REFERENCES searchspaces(id) ON DELETE CASCADE - ) - """) + ); + """ + ) - # Create indexes - op.create_index(op.f("ix_logs_id"), "logs", ["id"], unique=False) - op.create_index(op.f("ix_logs_created_at"), "logs", ["created_at"], unique=False) - op.create_index(op.f("ix_logs_level"), "logs", ["level"], unique=False) - op.create_index(op.f("ix_logs_status"), "logs", ["status"], unique=False) - op.create_index(op.f("ix_logs_source"), "logs", ["source"], unique=False) + # Get existing indexes + conn = op.get_bind() + inspector = inspect(conn) + existing_indexes = [idx["name"] for idx in inspector.get_indexes("logs")] + + # Create indexes only if they don't already exist + if "ix_logs_id" not in existing_indexes: + op.create_index("ix_logs_id", "logs", ["id"]) + if "ix_logs_created_at" not in existing_indexes: + op.create_index("ix_logs_created_at", "logs", ["created_at"]) + if "ix_logs_level" not in existing_indexes: + op.create_index("ix_logs_level", "logs", ["level"]) + if "ix_logs_status" not in existing_indexes: + op.create_index("ix_logs_status", "logs", ["status"]) + if "ix_logs_source" not in existing_indexes: + op.create_index("ix_logs_source", "logs", ["source"]) def downgrade() -> None: """Downgrade schema - remove logs table and enums.""" # Drop indexes - op.drop_index(op.f("ix_logs_source"), table_name="logs") - op.drop_index(op.f("ix_logs_status"), table_name="logs") - op.drop_index(op.f("ix_logs_level"), table_name="logs") - op.drop_index(op.f("ix_logs_created_at"), table_name="logs") - op.drop_index(op.f("ix_logs_id"), table_name="logs") + op.drop_index("ix_logs_source", table_name="logs") + op.drop_index("ix_logs_status", table_name="logs") + op.drop_index("ix_logs_level", table_name="logs") + op.drop_index("ix_logs_created_at", table_name="logs") + op.drop_index("ix_logs_id", table_name="logs") # Drop logs table op.drop_table("logs") diff --git a/surfsense_backend/alembic/versions/13_add_jira_connector_enums.py b/surfsense_backend/alembic/versions/13_add_jira_connector_enums.py new file mode 100644 index 0000000..18930b4 --- /dev/null +++ b/surfsense_backend/alembic/versions/13_add_jira_connector_enums.py @@ -0,0 +1,61 @@ +"""Add JIRA_CONNECTOR to enums + +Revision ID: 13 +Revises: 12 +""" + +from collections.abc import Sequence + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "13" +down_revision: str | None = "12" +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None + + +def upgrade() -> None: + """Safely add 'JIRA_CONNECTOR' to enum types if missing.""" + + # Add to searchsourceconnectortype enum + op.execute( + """ + DO $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_type t + JOIN pg_enum e ON t.oid = e.enumtypid + WHERE t.typname = 'searchsourceconnectortype' AND e.enumlabel = 'JIRA_CONNECTOR' + ) THEN + ALTER TYPE searchsourceconnectortype ADD VALUE 'JIRA_CONNECTOR'; + END IF; + END + $$; + """ + ) + + # Add to documenttype enum + op.execute( + """ + DO $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_type t + JOIN pg_enum e ON t.oid = e.enumtypid + WHERE t.typname = 'documenttype' AND e.enumlabel = 'JIRA_CONNECTOR' + ) THEN + ALTER TYPE documenttype ADD VALUE 'JIRA_CONNECTOR'; + END IF; + END + $$; + """ + ) + + +def downgrade() -> None: + """ + Downgrade logic not implemented since PostgreSQL + does not support removing enum values. + """ + pass diff --git a/surfsense_backend/alembic/versions/1_add_github_connector_enum.py b/surfsense_backend/alembic/versions/1_add_github_connector_enum.py index e54418c..235908b 100644 --- a/surfsense_backend/alembic/versions/1_add_github_connector_enum.py +++ b/surfsense_backend/alembic/versions/1_add_github_connector_enum.py @@ -25,7 +25,23 @@ def upgrade() -> None: # Manually add the command to add the enum value # Note: It's generally better to let autogenerate handle this, but we're bypassing it - op.execute("ALTER TYPE searchsourceconnectortype ADD VALUE 'GITHUB_CONNECTOR'") + op.execute( + """ +DO $$ +BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_enum + WHERE enumlabel = 'GITHUB_CONNECTOR' + AND enumtypid = ( + SELECT oid FROM pg_type WHERE typname = 'searchsourceconnectortype' + ) + ) THEN + ALTER TYPE searchsourceconnectortype ADD VALUE 'GITHUB_CONNECTOR'; + END IF; +END$$; +""" + ) # Pass for the rest, as autogenerate didn't run to add other schema details pass diff --git a/surfsense_backend/alembic/versions/2_add_linear_connector_enum.py b/surfsense_backend/alembic/versions/2_add_linear_connector_enum.py index ffe6293..b044556 100644 --- a/surfsense_backend/alembic/versions/2_add_linear_connector_enum.py +++ b/surfsense_backend/alembic/versions/2_add_linear_connector_enum.py @@ -17,14 +17,25 @@ depends_on: str | Sequence[str] | None = None def upgrade() -> None: - # ### commands auto generated by Alembic - please adjust! ### + op.execute( + """ + DO $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_enum + WHERE enumlabel = 'LINEAR_CONNECTOR' + AND enumtypid = ( + SELECT oid FROM pg_type WHERE typname = 'searchsourceconnectortype' + ) + ) THEN + ALTER TYPE searchsourceconnectortype ADD VALUE 'LINEAR_CONNECTOR'; + END IF; + END$$; + """ + ) - # Manually add the command to add the enum value - op.execute("ALTER TYPE searchsourceconnectortype ADD VALUE 'LINEAR_CONNECTOR'") - # Pass for the rest, as autogenerate didn't run to add other schema details - pass - # ### end Alembic commands ### +# def downgrade() -> None: diff --git a/surfsense_backend/alembic/versions/3_add_linear_connector_to_documenttype_.py b/surfsense_backend/alembic/versions/3_add_linear_connector_to_documenttype_.py index 8c4625b..001cac3 100644 --- a/surfsense_backend/alembic/versions/3_add_linear_connector_to_documenttype_.py +++ b/surfsense_backend/alembic/versions/3_add_linear_connector_to_documenttype_.py @@ -22,7 +22,22 @@ NEW_VALUE = "LINEAR_CONNECTOR" def upgrade() -> None: """Upgrade schema.""" - op.execute(f"ALTER TYPE {ENUM_NAME} ADD VALUE '{NEW_VALUE}'") + op.execute( + f""" + DO $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_enum + WHERE enumlabel = '{NEW_VALUE}' + AND enumtypid = ( + SELECT oid FROM pg_type WHERE typname = '{ENUM_NAME}' + ) + ) THEN + ALTER TYPE {ENUM_NAME} ADD VALUE '{NEW_VALUE}'; + END IF; + END$$; + """ + ) # Warning: This will delete all rows with the new value diff --git a/surfsense_backend/alembic/versions/6_change_podcast_content_to_transcript.py b/surfsense_backend/alembic/versions/6_change_podcast_content_to_transcript.py index 8a23e86..54c265d 100644 --- a/surfsense_backend/alembic/versions/6_change_podcast_content_to_transcript.py +++ b/surfsense_backend/alembic/versions/6_change_podcast_content_to_transcript.py @@ -8,6 +8,7 @@ Revises: 5 from collections.abc import Sequence import sqlalchemy as sa +from sqlalchemy import inspect from sqlalchemy.dialects.postgresql import JSON from alembic import op @@ -20,21 +21,28 @@ depends_on: str | Sequence[str] | None = None def upgrade() -> None: - # Drop the old column and create a new one with the new name and type - # We need to do this because PostgreSQL doesn't support direct column renames with type changes - op.add_column( - "podcasts", - sa.Column("podcast_transcript", JSON, nullable=False, server_default="{}"), - ) + bind = op.get_bind() + inspector = inspect(bind) - # Copy data from old column to new column - # Convert text to JSON by storing it as a JSON string value - op.execute( - "UPDATE podcasts SET podcast_transcript = jsonb_build_object('text', podcast_content) WHERE podcast_content != ''" - ) + columns = [col["name"] for col in inspector.get_columns("podcasts")] + if "podcast_transcript" not in columns: + op.add_column( + "podcasts", + sa.Column("podcast_transcript", JSON, nullable=False, server_default="{}"), + ) - # Drop the old column - op.drop_column("podcasts", "podcast_content") + # Copy data from old column to new column + op.execute( + """ + UPDATE podcasts + SET podcast_transcript = jsonb_build_object('text', podcast_content) + WHERE podcast_content != '' + """ + ) + + # Drop the old column only if it exists + if "podcast_content" in columns: + op.drop_column("podcasts", "podcast_content") def downgrade() -> None: diff --git a/surfsense_backend/alembic/versions/7_remove_is_generated_column.py b/surfsense_backend/alembic/versions/7_remove_is_generated_column.py index 0416944..17238c3 100644 --- a/surfsense_backend/alembic/versions/7_remove_is_generated_column.py +++ b/surfsense_backend/alembic/versions/7_remove_is_generated_column.py @@ -8,6 +8,7 @@ Revises: 6 from collections.abc import Sequence import sqlalchemy as sa +from sqlalchemy import inspect from alembic import op @@ -19,8 +20,14 @@ depends_on: str | Sequence[str] | None = None def upgrade() -> None: - # Drop the is_generated column - op.drop_column("podcasts", "is_generated") + # Get the current database connection + bind = op.get_bind() + inspector = inspect(bind) + + # Check if the column exists before attempting to drop it + columns = [col["name"] for col in inspector.get_columns("podcasts")] + if "is_generated" in columns: + op.drop_column("podcasts", "is_generated") def downgrade() -> None: diff --git a/surfsense_backend/alembic/versions/8_add_content_hash_to_documents.py b/surfsense_backend/alembic/versions/8_add_content_hash_to_documents.py index 10f68d4..6fa65a8 100644 --- a/surfsense_backend/alembic/versions/8_add_content_hash_to_documents.py +++ b/surfsense_backend/alembic/versions/8_add_content_hash_to_documents.py @@ -7,6 +7,7 @@ Revises: 7 from collections.abc import Sequence import sqlalchemy as sa +from sqlalchemy import inspect from alembic import op @@ -18,44 +19,53 @@ depends_on: str | Sequence[str] | None = None def upgrade() -> None: - # Add content_hash column as nullable first to handle existing data - op.add_column("documents", sa.Column("content_hash", sa.String(), nullable=True)) + bind = op.get_bind() + inspector = inspect(bind) + columns = [col["name"] for col in inspector.get_columns("documents")] - # Update existing documents to generate content hashes - # Using SHA-256 hash of the content column with proper UTF-8 encoding - op.execute(""" - UPDATE documents - SET content_hash = encode(sha256(convert_to(content, 'UTF8')), 'hex') - WHERE content_hash IS NULL - """) - - # Handle duplicate content hashes by keeping only the oldest document for each hash - # Delete newer documents with duplicate content hashes - op.execute(""" - DELETE FROM documents - WHERE id NOT IN ( - SELECT MIN(id) - FROM documents - GROUP BY content_hash + # Only add the column if it doesn't already exist + if "content_hash" not in columns: + op.add_column( + "documents", sa.Column("content_hash", sa.String(), nullable=True) ) - """) - # Now alter the column to match the model: nullable=False, index=True, unique=True - op.alter_column( - "documents", "content_hash", existing_type=sa.String(), nullable=False - ) - op.create_index( - op.f("ix_documents_content_hash"), "documents", ["content_hash"], unique=False - ) - op.create_unique_constraint( - op.f("uq_documents_content_hash"), "documents", ["content_hash"] - ) + # Populate the content_hash column + op.execute( + """ + UPDATE documents + SET content_hash = encode(sha256(convert_to(content, 'UTF8')), 'hex') + WHERE content_hash IS NULL + """ + ) + + op.execute( + """ + DELETE FROM documents + WHERE id NOT IN ( + SELECT MIN(id) + FROM documents + GROUP BY content_hash + ) + """ + ) + + op.alter_column( + "documents", "content_hash", existing_type=sa.String(), nullable=False + ) + op.create_index( + op.f("ix_documents_content_hash"), + "documents", + ["content_hash"], + unique=False, + ) + op.create_unique_constraint( + op.f("uq_documents_content_hash"), "documents", ["content_hash"] + ) + else: + print("Column 'content_hash' already exists. Skipping column creation.") def downgrade() -> None: - # Remove constraints and index first op.drop_constraint(op.f("uq_documents_content_hash"), "documents", type_="unique") op.drop_index(op.f("ix_documents_content_hash"), table_name="documents") - - # Remove content_hash column from documents table op.drop_column("documents", "content_hash") diff --git a/surfsense_backend/alembic/versions/9_add_discord_connector_enum_and_documenttype.py b/surfsense_backend/alembic/versions/9_add_discord_connector_enum_and_documenttype.py index 8be1e39..ed6f238 100644 --- a/surfsense_backend/alembic/versions/9_add_discord_connector_enum_and_documenttype.py +++ b/surfsense_backend/alembic/versions/9_add_discord_connector_enum_and_documenttype.py @@ -22,11 +22,38 @@ DOCUMENT_NEW_VALUE = "DISCORD_CONNECTOR" def upgrade() -> None: - """Upgrade schema - add DISCORD_CONNECTOR to connector and document enum.""" - # Add DISCORD_CONNECTOR to searchsourceconnectortype - op.execute(f"ALTER TYPE {CONNECTOR_ENUM} ADD VALUE '{CONNECTOR_NEW_VALUE}'") - # Add DISCORD_CONNECTOR to documenttype - op.execute(f"ALTER TYPE {DOCUMENT_ENUM} ADD VALUE '{DOCUMENT_NEW_VALUE}'") + """Upgrade schema - add DISCORD_CONNECTOR to connector and document enum safely.""" + # Add DISCORD_CONNECTOR to searchsourceconnectortype only if not exists + op.execute( + f""" + DO $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_enum + WHERE enumlabel = '{CONNECTOR_NEW_VALUE}' + AND enumtypid = (SELECT oid FROM pg_type WHERE typname = '{CONNECTOR_ENUM}') + ) THEN + ALTER TYPE {CONNECTOR_ENUM} ADD VALUE '{CONNECTOR_NEW_VALUE}'; + END IF; + END$$; + """ + ) + + # Add DISCORD_CONNECTOR to documenttype only if not exists + op.execute( + f""" + DO $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_enum + WHERE enumlabel = '{DOCUMENT_NEW_VALUE}' + AND enumtypid = (SELECT oid FROM pg_type WHERE typname = '{DOCUMENT_ENUM}') + ) THEN + ALTER TYPE {DOCUMENT_ENUM} ADD VALUE '{DOCUMENT_NEW_VALUE}'; + END IF; + END$$; + """ + ) def downgrade() -> None: diff --git a/surfsense_backend/alembic/versions/e55302644c51_add_github_connector_to_documenttype_.py b/surfsense_backend/alembic/versions/e55302644c51_add_github_connector_to_documenttype_.py index 3c7b3a7..0ef43f6 100644 --- a/surfsense_backend/alembic/versions/e55302644c51_add_github_connector_to_documenttype_.py +++ b/surfsense_backend/alembic/versions/e55302644c51_add_github_connector_to_documenttype_.py @@ -1,10 +1,3 @@ -"""Add GITHUB_CONNECTOR to DocumentType enum - -Revision ID: e55302644c51 -Revises: 1 - -""" - from collections.abc import Sequence from alembic import op @@ -16,23 +9,34 @@ branch_labels: str | Sequence[str] | None = None depends_on: str | Sequence[str] | None = None # Define the ENUM type name and the new value -ENUM_NAME = "documenttype" # Make sure this matches the name in your DB (usually lowercase class name) +ENUM_NAME = "documenttype" NEW_VALUE = "GITHUB_CONNECTOR" def upgrade() -> None: """Upgrade schema.""" - op.execute(f"ALTER TYPE {ENUM_NAME} ADD VALUE '{NEW_VALUE}'") + op.execute( + f""" + DO $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_enum + WHERE enumlabel = '{NEW_VALUE}' + AND enumtypid = ( + SELECT oid FROM pg_type WHERE typname = '{ENUM_NAME}' + ) + ) THEN + ALTER TYPE {ENUM_NAME} ADD VALUE '{NEW_VALUE}'; + END IF; + END$$; + """ + ) -# Warning: This will delete all rows with the new value def downgrade() -> None: """Downgrade schema - remove GITHUB_CONNECTOR from enum.""" - - # The old type name old_enum_name = f"{ENUM_NAME}_old" - # Enum values *before* GITHUB_CONNECTOR was added old_values = ( "EXTENSION", "CRAWLED_URL", @@ -43,25 +47,21 @@ def downgrade() -> None: ) old_values_sql = ", ".join([f"'{v}'" for v in old_values]) - # Table and column names (adjust if different) table_name = "documents" column_name = "document_type" - # 1. Rename the current enum type - op.execute(f"ALTER TYPE {ENUM_NAME} RENAME TO {old_enum_name}") + # 1. Create the new enum type with the old values + op.execute(f"CREATE TYPE {old_enum_name} AS ENUM({old_values_sql})") - # 2. Create the new enum type with the old values - op.execute(f"CREATE TYPE {ENUM_NAME} AS ENUM({old_values_sql})") - - # 3. Update the table: + # 2. Delete rows using the new value op.execute(f"DELETE FROM {table_name} WHERE {column_name}::text = '{NEW_VALUE}'") - # 4. Alter the column to use the new enum type (casting old values) + # 3. Alter the column to use the old enum type op.execute( f"ALTER TABLE {table_name} ALTER COLUMN {column_name} " - f"TYPE {ENUM_NAME} USING {column_name}::text::{ENUM_NAME}" + f"TYPE {old_enum_name} USING {column_name}::text::{old_enum_name}" ) - # 5. Drop the old enum type - op.execute(f"DROP TYPE {old_enum_name}") - # ### end Alembic commands ### + # 4. Drop the current enum type and rename the old one + op.execute(f"DROP TYPE {ENUM_NAME}") + op.execute(f"ALTER TYPE {old_enum_name} RENAME TO {ENUM_NAME}") diff --git a/surfsense_backend/app/agents/researcher/nodes.py b/surfsense_backend/app/agents/researcher/nodes.py index 67550d6..9831115 100644 --- a/surfsense_backend/app/agents/researcher/nodes.py +++ b/surfsense_backend/app/agents/researcher/nodes.py @@ -84,9 +84,9 @@ async def fetch_documents_by_ids( "document": { "id": doc.id, "title": doc.title, - "document_type": doc.document_type.value - if doc.document_type - else "UNKNOWN", + "document_type": ( + doc.document_type.value if doc.document_type else "UNKNOWN" + ), "metadata": doc.document_metadata or {}, }, "source": doc.document_type.value if doc.document_type else "UNKNOWN", @@ -186,9 +186,11 @@ async def fetch_documents_by_ids( title = f"GitHub: {doc.title}" description = metadata.get( "description", - doc.content[:100] + "..." - if len(doc.content) > 100 - else doc.content, + ( + doc.content[:100] + "..." + if len(doc.content) > 100 + else doc.content + ), ) url = metadata.get("url", "") @@ -204,9 +206,11 @@ async def fetch_documents_by_ids( description = metadata.get( "description", - doc.content[:100] + "..." - if len(doc.content) > 100 - else doc.content, + ( + doc.content[:100] + "..." + if len(doc.content) > 100 + else doc.content + ), ) url = ( f"https://www.youtube.com/watch?v={video_id}" @@ -238,6 +242,35 @@ async def fetch_documents_by_ids( else: url = "" + elif doc_type == "JIRA_CONNECTOR": + # Extract Jira-specific metadata + issue_key = metadata.get("issue_key", "Unknown Issue") + issue_title = metadata.get("issue_title", "Untitled Issue") + status = metadata.get("status", "") + priority = metadata.get("priority", "") + issue_type = metadata.get("issue_type", "") + + title = f"Jira: {issue_key} - {issue_title}" + if status: + title += f" ({status})" + + description = ( + doc.content[:100] + "..." + if len(doc.content) > 100 + else doc.content + ) + if priority: + description += f" | Priority: {priority}" + if issue_type: + description += f" | Type: {issue_type}" + + # Construct Jira URL if we have the base URL + base_url = metadata.get("base_url", "") + if base_url and issue_key: + url = f"{base_url}/browse/{issue_key}" + else: + url = "" + elif doc_type == "EXTENSION": # Extract Extension-specific metadata webpage_title = metadata.get("VisitedWebPageTitle", doc.title) @@ -268,9 +301,11 @@ async def fetch_documents_by_ids( "og:description", metadata.get( "ogDescription", - doc.content[:100] + "..." - if len(doc.content) > 100 - else doc.content, + ( + doc.content[:100] + "..." + if len(doc.content) > 100 + else doc.content + ), ), ) url = metadata.get("url", "") @@ -301,6 +336,7 @@ async def fetch_documents_by_ids( "GITHUB_CONNECTOR": "GitHub (Selected)", "YOUTUBE_VIDEO": "YouTube Videos (Selected)", "DISCORD_CONNECTOR": "Discord (Selected)", + "JIRA_CONNECTOR": "Jira Issues (Selected)", "EXTENSION": "Browser Extension (Selected)", "CRAWLED_URL": "Web Pages (Selected)", "FILE": "Files (Selected)", @@ -376,10 +412,10 @@ async def write_answer_outline( # Create the human message content human_message_content = f""" Now Please create an answer outline for the following query: - + User Query: {reformulated_query} Number of Sections: {num_sections} - + Remember to format your response as valid JSON exactly matching this structure: {{ "answer_outline": [ @@ -393,7 +429,7 @@ async def write_answer_outline( }} ] }} - + Your output MUST be valid JSON in exactly this format. Do not include any other text or explanation. """ @@ -802,7 +838,9 @@ async def fetch_relevant_documents( source_object, linkup_chunks, ) = await connector_service.search_linkup( - user_query=reformulated_query, user_id=user_id, mode=linkup_mode + user_query=reformulated_query, + user_id=user_id, + mode=linkup_mode, ) # Add to sources and raw documents @@ -845,6 +883,30 @@ async def fetch_relevant_documents( } ) + elif connector == "JIRA_CONNECTOR": + source_object, jira_chunks = await connector_service.search_jira( + user_query=reformulated_query, + user_id=user_id, + search_space_id=search_space_id, + top_k=top_k, + search_mode=search_mode, + ) + + # Add to sources and raw documents + if source_object: + all_sources.append(source_object) + all_raw_documents.extend(jira_chunks) + + # Stream found document count + if streaming_service and writer: + writer( + { + "yield_value": streaming_service.format_terminal_info_delta( + f"🎫 Found {len(jira_chunks)} Jira issues related to your query" + ) + } + ) + except Exception as e: error_message = f"Error searching connector {connector}: {e!s}" print(error_message) @@ -1214,7 +1276,7 @@ async def process_sections( # Combine the results into a final report with section titles final_report = [] - for _, (section, content) in enumerate( + for _i, (section, content) in enumerate( zip(answer_outline.answer_outline, processed_results, strict=False) ): # Skip adding the section header since the content already contains the title @@ -1725,11 +1787,11 @@ async def generate_further_questions( # Create the human message content human_message_content = f""" {chat_history_xml} - + {documents_xml} - + Based on the chat history and available documents above, generate 3-5 contextually relevant follow-up questions that would naturally extend the conversation and provide additional value to the user. Make sure the questions can be reasonably answered using the available documents or knowledge base. - + Your response MUST be valid JSON in exactly this format: {{ "further_questions": [ @@ -1743,7 +1805,7 @@ async def generate_further_questions( }} ] }} - + Do not include any other text or explanation. Only return the JSON. """ diff --git a/surfsense_backend/app/agents/researcher/qna_agent/prompts.py b/surfsense_backend/app/agents/researcher/qna_agent/prompts.py index eed0722..3f4d975 100644 --- a/surfsense_backend/app/agents/researcher/qna_agent/prompts.py +++ b/surfsense_backend/app/agents/researcher/qna_agent/prompts.py @@ -15,7 +15,8 @@ You are SurfSense, an advanced AI research assistant that provides detailed, wel - YOUTUBE_VIDEO: "YouTube video transcripts and metadata" (personally saved videos) - GITHUB_CONNECTOR: "GitHub repository content and issues" (personal repositories and interactions) - LINEAR_CONNECTOR: "Linear project issues and discussions" (personal project management) -- DISCORD_CONNECTOR: "Discord server messages and channels" (personal community interactions) +- JIRA_CONNECTOR: "Jira project issues, tickets, and comments" (personal project tracking) +- DISCORD_CONNECTOR: "Discord server conversations and shared content" (personal community communications) - TAVILY_API: "Tavily search API results" (personalized search results) - LINKUP_API: "Linkup search API results" (personalized search results) @@ -71,7 +72,7 @@ You are SurfSense, an advanced AI research assistant that provides detailed, wel Python's asyncio library provides tools for writing concurrent code using the async/await syntax. It's particularly useful for I/O-bound and high-level structured network code. - + 12 diff --git a/surfsense_backend/app/agents/researcher/utils.py b/surfsense_backend/app/agents/researcher/utils.py index 908b3ab..e26788c 100644 --- a/surfsense_backend/app/agents/researcher/utils.py +++ b/surfsense_backend/app/agents/researcher/utils.py @@ -43,6 +43,8 @@ def get_connector_emoji(connector_name: str) -> str: "NOTION_CONNECTOR": "📘", "GITHUB_CONNECTOR": "🐙", "LINEAR_CONNECTOR": "📊", + "JIRA_CONNECTOR": "🎫", + "DISCORD_CONNECTOR": "🗨️", "TAVILY_API": "🔍", "LINKUP_API": "🔗", } @@ -60,6 +62,8 @@ def get_connector_friendly_name(connector_name: str) -> str: "NOTION_CONNECTOR": "Notion", "GITHUB_CONNECTOR": "GitHub", "LINEAR_CONNECTOR": "Linear", + "JIRA_CONNECTOR": "Jira", + "DISCORD_CONNECTOR": "Discord", "TAVILY_API": "Tavily Search", "LINKUP_API": "Linkup Search", } diff --git a/surfsense_backend/app/connectors/jira_connector.py b/surfsense_backend/app/connectors/jira_connector.py new file mode 100644 index 0000000..ef0e003 --- /dev/null +++ b/surfsense_backend/app/connectors/jira_connector.py @@ -0,0 +1,487 @@ +""" +Jira Connector Module + +A module for retrieving data from Jira. +Allows fetching issue lists and their comments, projects and more. +""" + +import base64 +from datetime import datetime +from typing import Any + +import requests + + +class JiraConnector: + """Class for retrieving data from Jira.""" + + def __init__( + self, + base_url: str | None = None, + email: str | None = None, + api_token: str | None = None, + ): + """ + Initialize the JiraConnector class. + + Args: + base_url: Jira instance base URL (e.g., 'https://yourcompany.atlassian.net') (optional) + email: Jira account email address (optional) + api_token: Jira API token (optional) + """ + self.base_url = base_url.rstrip("/") if base_url else None + self.email = email + self.api_token = api_token + self.api_version = "3" # Jira Cloud API version + + def set_credentials(self, base_url: str, email: str, api_token: str) -> None: + """ + Set the Jira credentials. + + Args: + base_url: Jira instance base URL + email: Jira account email address + api_token: Jira API token + """ + self.base_url = base_url.rstrip("/") + self.email = email + self.api_token = api_token + + def set_email(self, email: str) -> None: + """ + Set the Jira account email. + + Args: + email: Jira account email address + """ + self.email = email + + def set_api_token(self, api_token: str) -> None: + """ + Set the Jira API token. + + Args: + api_token: Jira API token + """ + self.api_token = api_token + + def get_headers(self) -> dict[str, str]: + """ + Get headers for Jira API requests using Basic Authentication. + + Returns: + Dictionary of headers + + Raises: + ValueError: If email, api_token, or base_url have not been set + """ + if not all([self.base_url, self.email, self.api_token]): + raise ValueError( + "Jira credentials not initialized. Call set_credentials() first." + ) + + # Create Basic Auth header using email:api_token + auth_str = f"{self.email}:{self.api_token}" + auth_bytes = auth_str.encode("utf-8") + auth_header = "Basic " + base64.b64encode(auth_bytes).decode("ascii") + + return { + "Content-Type": "application/json", + "Authorization": auth_header, + "Accept": "application/json", + } + + def make_api_request( + self, endpoint: str, params: dict[str, Any] | None = None + ) -> dict[str, Any]: + """ + Make a request to the Jira API. + + Args: + endpoint: API endpoint (without base URL) + params: Query parameters for the request (optional) + + Returns: + Response data from the API + + Raises: + ValueError: If email, api_token, or base_url have not been set + Exception: If the API request fails + """ + if not all([self.base_url, self.email, self.api_token]): + raise ValueError( + "Jira credentials not initialized. Call set_credentials() first." + ) + + url = f"{self.base_url}/rest/api/{self.api_version}/{endpoint}" + headers = self.get_headers() + + response = requests.get(url, headers=headers, params=params, timeout=500) + + if response.status_code == 200: + return response.json() + else: + raise Exception( + f"API request failed with status code {response.status_code}: {response.text}" + ) + + def get_all_projects(self) -> dict[str, Any]: + """ + Fetch all projects from Jira. + + Returns: + List of project objects + + Raises: + ValueError: If credentials have not been set + Exception: If the API request fails + """ + return self.make_api_request("project/search") + + def get_all_issues(self, project_key: str | None = None) -> list[dict[str, Any]]: + """ + Fetch all issues from Jira. + + Args: + project_key: Optional project key to filter issues (e.g., 'PROJ') + + Returns: + List of issue objects + + Raises: + ValueError: If credentials have not been set + Exception: If the API request fails + """ + jql = "ORDER BY created DESC" + if project_key: + jql = f'project = "{project_key}" ' + jql + + fields = [ + "summary", + "description", + "status", + "assignee", + "reporter", + "created", + "updated", + "priority", + "issuetype", + "project", + ] + + params = { + "jql": jql, + "fields": ",".join(fields), + "maxResults": 100, + "startAt": 0, + } + + all_issues = [] + start_at = 0 + + while True: + params["startAt"] = start_at + result = self.make_api_request("search", params) + + if not isinstance(result, dict) or "issues" not in result: + raise Exception("Invalid response from Jira API") + + issues = result["issues"] + all_issues.extend(issues) + + print(f"Fetched {len(issues)} issues (startAt={start_at})") + + total = result.get("total", 0) + if start_at + len(issues) >= total: + break + + start_at += len(issues) + + return all_issues + + def get_issues_by_date_range( + self, + start_date: str, + end_date: str, + include_comments: bool = True, + project_key: str | None = None, + ) -> tuple[list[dict[str, Any]], str | None]: + """ + Fetch issues within a date range. + + Args: + start_date: Start date in YYYY-MM-DD format + end_date: End date in YYYY-MM-DD format (inclusive) + include_comments: Whether to include comments in the response + project_key: Optional project key to filter issues + + Returns: + Tuple containing (issues list, error message or None) + """ + try: + # Build JQL query for date range + # Query issues that were either created OR updated within the date range + date_filter = ( + f"(createdDate >= '{start_date}' AND createdDate <= '{end_date}')" + ) + # TODO : This JQL needs some improvement to work as expected + + _jql = f"{date_filter}" + if project_key: + _jql = ( + f'project = "{project_key}" AND {date_filter} ORDER BY created DESC' + ) + + # Define fields to retrieve + fields = [ + "summary", + "description", + "status", + "assignee", + "reporter", + "created", + "updated", + "priority", + "issuetype", + "project", + ] + + if include_comments: + fields.append("comment") + + params = { + # "jql": "", TODO : Add a JQL query to filter from a date range + "fields": ",".join(fields), + "maxResults": 100, + "startAt": 0, + } + + all_issues = [] + start_at = 0 + + while True: + params["startAt"] = start_at + + result = self.make_api_request("search", params) + + if not isinstance(result, dict) or "issues" not in result: + return [], "Invalid response from Jira API" + + issues = result["issues"] + all_issues.extend(issues) + + # Check if there are more issues to fetch + total = result.get("total", 0) + if start_at + len(issues) >= total: + break + + start_at += len(issues) + + if not all_issues: + return [], "No issues found in the specified date range." + + return all_issues, None + + except Exception as e: + return [], f"Error fetching issues: {e!s}" + + def format_issue(self, issue: dict[str, Any]) -> dict[str, Any]: + """ + Format an issue for easier consumption. + + Args: + issue: The issue object from Jira API + + Returns: + Formatted issue dictionary + """ + fields = issue.get("fields", {}) + + # Extract basic issue details + formatted = { + "id": issue.get("id", ""), + "key": issue.get("key", ""), + "title": fields.get("summary", ""), + "description": fields.get("description", ""), + "status": ( + fields.get("status", {}).get("name", "Unknown") + if fields.get("status") + else "Unknown" + ), + "status_category": ( + fields.get("status", {}) + .get("statusCategory", {}) + .get("name", "Unknown") + if fields.get("status") + else "Unknown" + ), + "priority": ( + fields.get("priority", {}).get("name", "Unknown") + if fields.get("priority") + else "Unknown" + ), + "issue_type": ( + fields.get("issuetype", {}).get("name", "Unknown") + if fields.get("issuetype") + else "Unknown" + ), + "project": ( + fields.get("project", {}).get("key", "Unknown") + if fields.get("project") + else "Unknown" + ), + "created_at": fields.get("created", ""), + "updated_at": fields.get("updated", ""), + "reporter": ( + { + "account_id": ( + fields.get("reporter", {}).get("accountId", "") + if fields.get("reporter") + else "" + ), + "display_name": ( + fields.get("reporter", {}).get("displayName", "Unknown") + if fields.get("reporter") + else "Unknown" + ), + "email": ( + fields.get("reporter", {}).get("emailAddress", "") + if fields.get("reporter") + else "" + ), + } + if fields.get("reporter") + else {"account_id": "", "display_name": "Unknown", "email": ""} + ), + "assignee": ( + { + "account_id": fields.get("assignee", {}).get("accountId", ""), + "display_name": fields.get("assignee", {}).get( + "displayName", "Unknown" + ), + "email": fields.get("assignee", {}).get("emailAddress", ""), + } + if fields.get("assignee") + else None + ), + "comments": [], + } + + # Extract comments if available + if "comment" in fields and "comments" in fields["comment"]: + for comment in fields["comment"]["comments"]: + formatted_comment = { + "id": comment.get("id", ""), + "body": comment.get("body", ""), + "created_at": comment.get("created", ""), + "updated_at": comment.get("updated", ""), + "author": ( + { + "account_id": ( + comment.get("author", {}).get("accountId", "") + if comment.get("author") + else "" + ), + "display_name": ( + comment.get("author", {}).get("displayName", "Unknown") + if comment.get("author") + else "Unknown" + ), + "email": ( + comment.get("author", {}).get("emailAddress", "") + if comment.get("author") + else "" + ), + } + if comment.get("author") + else {"account_id": "", "display_name": "Unknown", "email": ""} + ), + } + formatted["comments"].append(formatted_comment) + + return formatted + + def format_issue_to_markdown(self, issue: dict[str, Any]) -> str: + """ + Convert an issue to markdown format. + + Args: + issue: The issue object (either raw or formatted) + + Returns: + Markdown string representation of the issue + """ + # Format the issue if it's not already formatted + if "key" not in issue: + issue = self.format_issue(issue) + + # Build the markdown content + markdown = ( + f"# {issue.get('key', 'No Key')}: {issue.get('title', 'No Title')}\n\n" + ) + + if issue.get("status"): + markdown += f"**Status:** {issue['status']}\n" + + if issue.get("priority"): + markdown += f"**Priority:** {issue['priority']}\n" + + if issue.get("issue_type"): + markdown += f"**Type:** {issue['issue_type']}\n" + + if issue.get("project"): + markdown += f"**Project:** {issue['project']}\n\n" + + if issue.get("assignee") and issue["assignee"].get("display_name"): + markdown += f"**Assignee:** {issue['assignee']['display_name']}\n" + + if issue.get("reporter") and issue["reporter"].get("display_name"): + markdown += f"**Reporter:** {issue['reporter']['display_name']}\n" + + if issue.get("created_at"): + created_date = self.format_date(issue["created_at"]) + markdown += f"**Created:** {created_date}\n" + + if issue.get("updated_at"): + updated_date = self.format_date(issue["updated_at"]) + markdown += f"**Updated:** {updated_date}\n\n" + + if issue.get("description"): + markdown += f"## Description\n\n{issue['description']}\n\n" + + if issue.get("comments"): + markdown += f"## Comments ({len(issue['comments'])})\n\n" + + for comment in issue["comments"]: + author_name = "Unknown" + if comment.get("author") and comment["author"].get("display_name"): + author_name = comment["author"]["display_name"] + + comment_date = "Unknown date" + if comment.get("created_at"): + comment_date = self.format_date(comment["created_at"]) + + markdown += f"### {author_name} ({comment_date})\n\n{comment.get('body', '')}\n\n---\n\n" + + return markdown + + @staticmethod + def format_date(iso_date: str) -> str: + """ + Format an ISO date string to a more readable format. + + Args: + iso_date: ISO format date string + + Returns: + Formatted date string + """ + if not iso_date or not isinstance(iso_date, str): + return "Unknown date" + + try: + # Jira dates are typically in format: 2023-01-01T12:00:00.000+0000 + dt = datetime.fromisoformat(iso_date.replace("Z", "+00:00")) + return dt.strftime("%Y-%m-%d %H:%M:%S") + except ValueError: + return iso_date diff --git a/surfsense_backend/app/db.py b/surfsense_backend/app/db.py index 3d235d0..1a7aa57 100644 --- a/surfsense_backend/app/db.py +++ b/surfsense_backend/app/db.py @@ -3,6 +3,7 @@ from datetime import UTC, datetime from enum import Enum from fastapi import Depends +from fastapi_users.db import SQLAlchemyBaseUserTableUUID, SQLAlchemyUserDatabase from pgvector.sqlalchemy import Vector from sqlalchemy import ( ARRAY, @@ -26,13 +27,7 @@ from app.retriver.chunks_hybrid_search import ChucksHybridSearchRetriever from app.retriver.documents_hybrid_search import DocumentHybridSearchRetriever if config.AUTH_TYPE == "GOOGLE": - from fastapi_users.db import ( - SQLAlchemyBaseOAuthAccountTableUUID, - SQLAlchemyBaseUserTableUUID, - SQLAlchemyUserDatabase, - ) -else: - from fastapi_users.db import SQLAlchemyBaseUserTableUUID, SQLAlchemyUserDatabase + from fastapi_users.db import SQLAlchemyBaseOAuthAccountTableUUID DATABASE_URL = config.DATABASE_URL @@ -47,6 +42,7 @@ class DocumentType(str, Enum): GITHUB_CONNECTOR = "GITHUB_CONNECTOR" LINEAR_CONNECTOR = "LINEAR_CONNECTOR" DISCORD_CONNECTOR = "DISCORD_CONNECTOR" + JIRA_CONNECTOR = "JIRA_CONNECTOR" class SearchSourceConnectorType(str, Enum): @@ -58,6 +54,7 @@ class SearchSourceConnectorType(str, Enum): GITHUB_CONNECTOR = "GITHUB_CONNECTOR" LINEAR_CONNECTOR = "LINEAR_CONNECTOR" DISCORD_CONNECTOR = "DISCORD_CONNECTOR" + JIRA_CONNECTOR = "JIRA_CONNECTOR" class ChatType(str, Enum): @@ -320,6 +317,7 @@ if config.AUTH_TYPE == "GOOGLE": strategic_llm = relationship( "LLMConfig", foreign_keys=[strategic_llm_id], post_update=True ) + else: class User(SQLAlchemyBaseUserTableUUID, Base): @@ -402,6 +400,7 @@ if config.AUTH_TYPE == "GOOGLE": async def get_user_db(session: AsyncSession = Depends(get_async_session)): yield SQLAlchemyUserDatabase(session, User, OAuthAccount) + else: async def get_user_db(session: AsyncSession = Depends(get_async_session)): diff --git a/surfsense_backend/app/routes/search_source_connectors_routes.py b/surfsense_backend/app/routes/search_source_connectors_routes.py index 47caa97..4c3d691 100644 --- a/surfsense_backend/app/routes/search_source_connectors_routes.py +++ b/surfsense_backend/app/routes/search_source_connectors_routes.py @@ -38,6 +38,7 @@ from app.schemas import ( from app.tasks.connectors_indexing_tasks import ( index_discord_messages, index_github_repos, + index_jira_issues, index_linear_issues, index_notion_pages, index_slack_messages, @@ -336,6 +337,7 @@ async def index_connector_content( - NOTION_CONNECTOR: Indexes pages from all accessible Notion pages - GITHUB_CONNECTOR: Indexes code and documentation from GitHub repositories - LINEAR_CONNECTOR: Indexes issues and comments from Linear + - JIRA_CONNECTOR: Indexes issues and comments from Jira - DISCORD_CONNECTOR: Indexes messages from all accessible Discord channels Args: @@ -353,7 +355,9 @@ async def index_connector_content( ) # Check if the search space belongs to the user - await check_ownership(session, SearchSpace, search_space_id, user) + _search_space = await check_ownership( + session, SearchSpace, search_space_id, user + ) # Handle different connector types response_message = "" @@ -438,6 +442,21 @@ async def index_connector_content( ) response_message = "Linear indexing started in the background." + elif connector.connector_type == SearchSourceConnectorType.JIRA_CONNECTOR: + # Run indexing in background + logger.info( + f"Triggering Jira indexing for connector {connector_id} into search space {search_space_id} from {indexing_from} to {indexing_to}" + ) + background_tasks.add_task( + run_jira_indexing_with_new_session, + connector_id, + search_space_id, + str(user.id), + indexing_from, + indexing_to, + ) + response_message = "Jira indexing started in the background." + elif connector.connector_type == SearchSourceConnectorType.DISCORD_CONNECTOR: # Run indexing in background logger.info( @@ -807,3 +826,61 @@ async def run_discord_indexing( ) except Exception as e: logger.error(f"Error in background Discord indexing task: {e!s}") + + +# Add new helper functions for Jira indexing +async def run_jira_indexing_with_new_session( + connector_id: int, + search_space_id: int, + user_id: str, + start_date: str, + end_date: str, +): + """Wrapper to run Jira indexing with its own database session.""" + logger.info( + f"Background task started: Indexing Jira connector {connector_id} into space {search_space_id} from {start_date} to {end_date}" + ) + async with async_session_maker() as session: + await run_jira_indexing( + session, connector_id, search_space_id, user_id, start_date, end_date + ) + logger.info(f"Background task finished: Indexing Jira connector {connector_id}") + + +async def run_jira_indexing( + session: AsyncSession, + connector_id: int, + search_space_id: int, + user_id: str, + start_date: str, + end_date: str, +): + """Runs the Jira indexing task and updates the timestamp.""" + try: + indexed_count, error_message = await index_jira_issues( + session, + connector_id, + search_space_id, + user_id, + start_date, + end_date, + update_last_indexed=False, + ) + if error_message: + logger.error( + f"Jira indexing failed for connector {connector_id}: {error_message}" + ) + # Optionally update status in DB to indicate failure + else: + logger.info( + f"Jira indexing successful for connector {connector_id}. Indexed {indexed_count} documents." + ) + # Update the last indexed timestamp only on success + await update_connector_last_indexed(session, connector_id) + await session.commit() # Commit timestamp update + except Exception as e: + logger.error( + f"Critical error in run_jira_indexing for connector {connector_id}: {e}", + exc_info=True, + ) + # Optionally update status in DB to indicate failure diff --git a/surfsense_backend/app/schemas/search_source_connector.py b/surfsense_backend/app/schemas/search_source_connector.py index 719a9f9..9c43d07 100644 --- a/surfsense_backend/app/schemas/search_source_connector.py +++ b/surfsense_backend/app/schemas/search_source_connector.py @@ -123,6 +123,25 @@ class SearchSourceConnectorBase(BaseModel): # Ensure the bot token is not empty if not config.get("DISCORD_BOT_TOKEN"): raise ValueError("DISCORD_BOT_TOKEN cannot be empty") + elif connector_type == SearchSourceConnectorType.JIRA_CONNECTOR: + # For JIRA_CONNECTOR, require JIRA_EMAIL, JIRA_API_TOKEN and JIRA_BASE_URL + allowed_keys = ["JIRA_EMAIL", "JIRA_API_TOKEN", "JIRA_BASE_URL"] + if set(config.keys()) != set(allowed_keys): + raise ValueError( + f"For JIRA_CONNECTOR connector type, config must only contain these keys: {allowed_keys}" + ) + + # Ensure the email is not empty + if not config.get("JIRA_EMAIL"): + raise ValueError("JIRA_EMAIL cannot be empty") + + # Ensure the API token is not empty + if not config.get("JIRA_API_TOKEN"): + raise ValueError("JIRA_API_TOKEN cannot be empty") + + # Ensure the base URL is not empty + if not config.get("JIRA_BASE_URL"): + raise ValueError("JIRA_BASE_URL cannot be empty") return config diff --git a/surfsense_backend/app/services/connector_service.py b/surfsense_backend/app/services/connector_service.py index 33001e2..1c6d612 100644 --- a/surfsense_backend/app/services/connector_service.py +++ b/surfsense_backend/app/services/connector_service.py @@ -1,4 +1,5 @@ import asyncio +from typing import Any from linkup import LinkupClient from sqlalchemy import func @@ -204,7 +205,9 @@ class ConnectorService: return result_object, files_chunks - def _transform_document_results(self, document_results: list[dict]) -> list[dict]: + def _transform_document_results( + self, document_results: list[dict[str, Any]] + ) -> list[dict[str, Any]]: """ Transform results from document_retriever.hybrid_search() to match the format expected by the processing code. @@ -608,6 +611,7 @@ class ConnectorService: visit_duration = metadata.get( "VisitedWebPageVisitDurationInMilliseconds", "" ) + _browsing_session_id = metadata.get("BrowsingSessionId", "") # Create a more descriptive title for extension data title = webpage_title @@ -948,6 +952,127 @@ class ConnectorService: return result_object, linear_chunks + async def search_jira( + self, + user_query: str, + user_id: str, + search_space_id: int, + top_k: int = 20, + search_mode: SearchMode = SearchMode.CHUNKS, + ) -> tuple: + """ + Search for Jira issues and comments and return both the source information and langchain documents + + Args: + user_query: The user's query + user_id: The user's ID + search_space_id: The search space ID to search in + top_k: Maximum number of results to return + search_mode: Search mode (CHUNKS or DOCUMENTS) + + Returns: + tuple: (sources_info, langchain_documents) + """ + if search_mode == SearchMode.CHUNKS: + jira_chunks = await self.chunk_retriever.hybrid_search( + query_text=user_query, + top_k=top_k, + user_id=user_id, + search_space_id=search_space_id, + document_type="JIRA_CONNECTOR", + ) + elif search_mode == SearchMode.DOCUMENTS: + jira_chunks = await self.document_retriever.hybrid_search( + query_text=user_query, + top_k=top_k, + user_id=user_id, + search_space_id=search_space_id, + document_type="JIRA_CONNECTOR", + ) + # Transform document retriever results to match expected format + jira_chunks = self._transform_document_results(jira_chunks) + + # Early return if no results + if not jira_chunks: + return { + "id": 30, + "name": "Jira Issues", + "type": "JIRA_CONNECTOR", + "sources": [], + }, [] + + # Process each chunk and create sources directly without deduplication + sources_list = [] + async with self.counter_lock: + for _i, chunk in enumerate(jira_chunks): + # Extract document metadata + document = chunk.get("document", {}) + metadata = document.get("metadata", {}) + + # Extract Jira-specific metadata + issue_key = metadata.get("issue_key", "") + issue_title = metadata.get("issue_title", "Untitled Issue") + status = metadata.get("status", "") + priority = metadata.get("priority", "") + issue_type = metadata.get("issue_type", "") + comment_count = metadata.get("comment_count", 0) + + # Create a more descriptive title for Jira issues + title = f"Jira: {issue_key} - {issue_title}" + if status: + title += f" ({status})" + + # Create a more descriptive description for Jira issues + description = chunk.get("content", "")[:100] + if len(description) == 100: + description += "..." + + # Add priority and type info to description + info_parts = [] + if priority: + info_parts.append(f"Priority: {priority}") + if issue_type: + info_parts.append(f"Type: {issue_type}") + if comment_count: + info_parts.append(f"Comments: {comment_count}") + + if info_parts: + if description: + description += f" | {' | '.join(info_parts)}" + else: + description = " | ".join(info_parts) + + # For URL, we could construct a URL to the Jira issue if we have the base URL + # For now, use a generic placeholder + url = "" + if issue_key and metadata.get("base_url"): + url = f"{metadata.get('base_url')}/browse/{issue_key}" + + source = { + "id": document.get("id", self.source_id_counter), + "title": title, + "description": description, + "url": url, + "issue_key": issue_key, + "status": status, + "priority": priority, + "issue_type": issue_type, + "comment_count": comment_count, + } + + self.source_id_counter += 1 + sources_list.append(source) + + # Create result object + result_object = { + "id": 10, # Assign a unique ID for the Jira connector + "name": "Jira Issues", + "type": "JIRA_CONNECTOR", + "sources": sources_list, + } + + return result_object, jira_chunks + async def search_linkup( self, user_query: str, user_id: str, mode: str = "standard" ) -> tuple: @@ -1013,12 +1138,12 @@ class ConnectorService: # Create a source entry source = { "id": self.source_id_counter, - "title": result.name - if hasattr(result, "name") - else "Linkup Result", - "description": result.content[:100] - if hasattr(result, "content") - else "", + "title": ( + result.name if hasattr(result, "name") else "Linkup Result" + ), + "description": ( + result.content[:100] if hasattr(result, "content") else "" + ), "url": result.url if hasattr(result, "url") else "", } sources_list.append(source) @@ -1030,9 +1155,11 @@ class ConnectorService: "score": 1.0, # Default score since not provided by Linkup "document": { "id": self.source_id_counter, - "title": result.name - if hasattr(result, "name") - else "Linkup Result", + "title": ( + result.name + if hasattr(result, "name") + else "Linkup Result" + ), "document_type": "LINKUP_API", "metadata": { "url": result.url if hasattr(result, "url") else "", diff --git a/surfsense_backend/app/tasks/connectors_indexing_tasks.py b/surfsense_backend/app/tasks/connectors_indexing_tasks.py index 053b8ba..e028a47 100644 --- a/surfsense_backend/app/tasks/connectors_indexing_tasks.py +++ b/surfsense_backend/app/tasks/connectors_indexing_tasks.py @@ -10,6 +10,7 @@ from sqlalchemy.future import select from app.config import config from app.connectors.discord_connector import DiscordConnector from app.connectors.github_connector import GitHubConnector +from app.connectors.jira_connector import JiraConnector from app.connectors.linear_connector import LinearConnector from app.connectors.notion_history import NotionHistoryConnector from app.connectors.slack_history import SlackHistory @@ -1374,9 +1375,9 @@ async def index_linear_issues( # Process each issue for issue in issues: try: - issue_id = issue.get("id") - issue_identifier = issue.get("identifier", "") - issue_title = issue.get("title", "") + issue_id = issue.get("key") + issue_identifier = issue.get("id", "") + issue_title = issue.get("key", "") if not issue_id or not issue_title: logger.warning( @@ -1978,3 +1979,353 @@ async def index_discord_messages( ) logger.error(f"Failed to index Discord messages: {e!s}", exc_info=True) return 0, f"Failed to index Discord messages: {e!s}" + + +async def index_jira_issues( + session: AsyncSession, + connector_id: int, + search_space_id: int, + user_id: str, + start_date: str | None = None, + end_date: str | None = None, + update_last_indexed: bool = True, +) -> tuple[int, str | None]: + """ + Index Jira issues and comments. + + Args: + session: Database session + connector_id: ID of the Jira connector + search_space_id: ID of the search space to store documents in + user_id: User ID + start_date: Start date for indexing (YYYY-MM-DD format) + end_date: End date for indexing (YYYY-MM-DD format) + update_last_indexed: Whether to update the last_indexed_at timestamp (default: True) + + Returns: + Tuple containing (number of documents indexed, error message or None) + """ + task_logger = TaskLoggingService(session, search_space_id) + + # Log task start + log_entry = await task_logger.log_task_start( + task_name="jira_issues_indexing", + source="connector_indexing_task", + message=f"Starting Jira issues indexing for connector {connector_id}", + metadata={ + "connector_id": connector_id, + "user_id": str(user_id), + "start_date": start_date, + "end_date": end_date, + }, + ) + + try: + # Get the connector from the database + result = await session.execute( + select(SearchSourceConnector).filter( + SearchSourceConnector.id == connector_id, + SearchSourceConnector.connector_type + == SearchSourceConnectorType.JIRA_CONNECTOR, + ) + ) + connector = result.scalars().first() + + if not connector: + await task_logger.log_task_failure( + log_entry, + f"Connector with ID {connector_id} not found", + "Connector not found", + {"error_type": "ConnectorNotFound"}, + ) + return 0, f"Connector with ID {connector_id} not found" + + # Get the Jira credentials from the connector config + jira_email = connector.config.get("JIRA_EMAIL") + jira_api_token = connector.config.get("JIRA_API_TOKEN") + jira_base_url = connector.config.get("JIRA_BASE_URL") + + if not jira_email or not jira_api_token or not jira_base_url: + await task_logger.log_task_failure( + log_entry, + f"Jira credentials not found in connector config for connector {connector_id}", + "Missing Jira credentials", + {"error_type": "MissingCredentials"}, + ) + return 0, "Jira credentials not found in connector config" + + # Initialize Jira client + await task_logger.log_task_progress( + log_entry, + f"Initializing Jira client for connector {connector_id}", + {"stage": "client_initialization"}, + ) + + jira_client = JiraConnector( + base_url=jira_base_url, email=jira_email, api_token=jira_api_token + ) + + # Calculate date range + if start_date is None or end_date is None: + # Fall back to calculating dates based on last_indexed_at + calculated_end_date = datetime.now() + + # Use last_indexed_at as start date if available, otherwise use 365 days ago + if connector.last_indexed_at: + # Convert dates to be comparable (both timezone-naive) + last_indexed_naive = ( + connector.last_indexed_at.replace(tzinfo=None) + if connector.last_indexed_at.tzinfo + else connector.last_indexed_at + ) + + # Check if last_indexed_at is in the future or after end_date + if last_indexed_naive > calculated_end_date: + logger.warning( + f"Last indexed date ({last_indexed_naive.strftime('%Y-%m-%d')}) is in the future. Using 365 days ago instead." + ) + calculated_start_date = calculated_end_date - timedelta(days=365) + else: + calculated_start_date = last_indexed_naive + logger.info( + f"Using last_indexed_at ({calculated_start_date.strftime('%Y-%m-%d')}) as start date" + ) + else: + calculated_start_date = calculated_end_date - timedelta( + days=365 + ) # Use 365 days as default + logger.info( + f"No last_indexed_at found, using {calculated_start_date.strftime('%Y-%m-%d')} (365 days ago) as start date" + ) + + # Use calculated dates if not provided + start_date_str = ( + start_date if start_date else calculated_start_date.strftime("%Y-%m-%d") + ) + end_date_str = ( + end_date if end_date else calculated_end_date.strftime("%Y-%m-%d") + ) + else: + # Use provided dates + start_date_str = start_date + end_date_str = end_date + + await task_logger.log_task_progress( + log_entry, + f"Fetching Jira issues from {start_date_str} to {end_date_str}", + { + "stage": "fetching_issues", + "start_date": start_date_str, + "end_date": end_date_str, + }, + ) + + # Get issues within date range + try: + issues, error = jira_client.get_issues_by_date_range( + start_date=start_date_str, end_date=end_date_str, include_comments=True + ) + + if error: + logger.error(f"Failed to get Jira issues: {error}") + + # Don't treat "No issues found" as an error that should stop indexing + if "No issues found" in error: + logger.info( + "No issues found is not a critical error, continuing with update" + ) + if update_last_indexed: + connector.last_indexed_at = datetime.now() + await session.commit() + logger.info( + f"Updated last_indexed_at to {connector.last_indexed_at} despite no issues found" + ) + + await task_logger.log_task_success( + log_entry, + f"No Jira issues found in date range {start_date_str} to {end_date_str}", + {"issues_found": 0}, + ) + return 0, None + else: + await task_logger.log_task_failure( + log_entry, + f"Failed to get Jira issues: {error}", + "API Error", + {"error_type": "APIError"}, + ) + return 0, f"Failed to get Jira issues: {error}" + + logger.info(f"Retrieved {len(issues)} issues from Jira API") + + except Exception as e: + logger.error(f"Error fetching Jira issues: {e!s}", exc_info=True) + return 0, f"Error fetching Jira issues: {e!s}" + + # Process and index each issue + documents_indexed = 0 + skipped_issues = [] + documents_skipped = 0 + + for issue in issues: + try: + issue_id = issue.get("key") + issue_identifier = issue.get("key", "") + issue_title = issue.get("id", "") + + if not issue_id or not issue_title: + logger.warning( + f"Skipping issue with missing ID or title: {issue_id or 'Unknown'}" + ) + skipped_issues.append( + f"{issue_identifier or 'Unknown'} (missing data)" + ) + documents_skipped += 1 + continue + + # Format the issue for better readability + formatted_issue = jira_client.format_issue(issue) + + # Convert to markdown + issue_content = jira_client.format_issue_to_markdown(formatted_issue) + + if not issue_content: + logger.warning( + f"Skipping issue with no content: {issue_identifier} - {issue_title}" + ) + skipped_issues.append(f"{issue_identifier} (no content)") + documents_skipped += 1 + continue + + # Create a simple summary + summary_content = f"Jira Issue {issue_identifier}: {issue_title}\n\nStatus: {formatted_issue.get('status', 'Unknown')}\n\n" + if formatted_issue.get("description"): + summary_content += ( + f"Description: {formatted_issue.get('description')}\n\n" + ) + + # Add comment count + comment_count = len(formatted_issue.get("comments", [])) + summary_content += f"Comments: {comment_count}" + + # Generate content hash + content_hash = generate_content_hash(issue_content, search_space_id) + + # Check if document already exists + existing_doc_by_hash_result = await session.execute( + select(Document).where(Document.content_hash == content_hash) + ) + existing_document_by_hash = ( + existing_doc_by_hash_result.scalars().first() + ) + + if existing_document_by_hash: + logger.info( + f"Document with content hash {content_hash} already exists for issue {issue_identifier}. Skipping processing." + ) + documents_skipped += 1 + continue + + # Generate embedding for the summary + summary_embedding = config.embedding_model_instance.embed( + summary_content + ) + + # Process chunks - using the full issue content with comments + chunks = [ + Chunk( + content=chunk.text, + embedding=config.embedding_model_instance.embed(chunk.text), + ) + for chunk in config.chunker_instance.chunk(issue_content) + ] + + # Create and store new document + logger.info( + f"Creating new document for issue {issue_identifier} - {issue_title}" + ) + document = Document( + search_space_id=search_space_id, + title=f"Jira - {issue_identifier}: {issue_title}", + document_type=DocumentType.JIRA_CONNECTOR, + document_metadata={ + "issue_id": issue_id, + "issue_identifier": issue_identifier, + "issue_title": issue_title, + "state": formatted_issue.get("status", "Unknown"), + "comment_count": comment_count, + "indexed_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), + }, + content=summary_content, + content_hash=content_hash, + embedding=summary_embedding, + chunks=chunks, + ) + + session.add(document) + documents_indexed += 1 + logger.info( + f"Successfully indexed new issue {issue_identifier} - {issue_title}" + ) + + except Exception as e: + logger.error( + f"Error processing issue {issue.get('identifier', 'Unknown')}: {e!s}", + exc_info=True, + ) + skipped_issues.append( + f"{issue.get('identifier', 'Unknown')} (processing error)" + ) + documents_skipped += 1 + continue # Skip this issue and continue with others + + # Update the last_indexed_at timestamp for the connector only if requested + total_processed = documents_indexed + if update_last_indexed: + connector.last_indexed_at = datetime.now() + logger.info(f"Updated last_indexed_at to {connector.last_indexed_at}") + + # Commit all changes + await session.commit() + logger.info("Successfully committed all JIRA document changes to database") + + # Log success + await task_logger.log_task_success( + log_entry, + f"Successfully completed JIRA indexing for connector {connector_id}", + { + "issues_processed": total_processed, + "documents_indexed": documents_indexed, + "documents_skipped": documents_skipped, + "skipped_issues_count": len(skipped_issues), + }, + ) + + logger.info( + f"JIRA indexing completed: {documents_indexed} new issues, {documents_skipped} skipped" + ) + return ( + total_processed, + None, + ) # Return None as the error message to indicate success + + except SQLAlchemyError as db_error: + await session.rollback() + await task_logger.log_task_failure( + log_entry, + f"Database error during JIRA indexing for connector {connector_id}", + str(db_error), + {"error_type": "SQLAlchemyError"}, + ) + logger.error(f"Database error: {db_error!s}", exc_info=True) + return 0, f"Database error: {db_error!s}" + except Exception as e: + await session.rollback() + await task_logger.log_task_failure( + log_entry, + f"Failed to index JIRA issues for connector {connector_id}", + str(e), + {"error_type": type(e).__name__}, + ) + logger.error(f"Failed to index JIRA issues: {e!s}", exc_info=True) + return 0, f"Failed to index JIRA issues: {e!s}" diff --git a/surfsense_web/app/dashboard/[search_space_id]/connectors/[connector_id]/edit/page.tsx b/surfsense_web/app/dashboard/[search_space_id]/connectors/[connector_id]/edit/page.tsx index 34db58f..918a625 100644 --- a/surfsense_web/app/dashboard/[search_space_id]/connectors/[connector_id]/edit/page.tsx +++ b/surfsense_web/app/dashboard/[search_space_id]/connectors/[connector_id]/edit/page.tsx @@ -9,12 +9,12 @@ import { ArrowLeft, Check, Loader2, Github } from "lucide-react"; import { Form } from "@/components/ui/form"; import { Button } from "@/components/ui/button"; import { - Card, - CardContent, - CardDescription, - CardFooter, - CardHeader, - CardTitle, + Card, + CardContent, + CardDescription, + CardFooter, + CardHeader, + CardTitle, } from "@/components/ui/card"; // Import Utils, Types, Hook, and Components @@ -27,201 +27,227 @@ import { EditSimpleTokenForm } from "@/components/editConnector/EditSimpleTokenF import { getConnectorIcon } from "@/components/chat"; export default function EditConnectorPage() { - const router = useRouter(); - const params = useParams(); - const searchSpaceId = params.search_space_id as string; - // Ensure connectorId is parsed safely - const connectorIdParam = params.connector_id as string; - const connectorId = connectorIdParam ? parseInt(connectorIdParam, 10) : NaN; + const router = useRouter(); + const params = useParams(); + const searchSpaceId = params.search_space_id as string; + // Ensure connectorId is parsed safely + const connectorIdParam = params.connector_id as string; + const connectorId = connectorIdParam ? parseInt(connectorIdParam, 10) : NaN; - // Use the custom hook to manage state and logic - const { - connectorsLoading, - connector, - isSaving, - editForm, - patForm, // Needed for GitHub child component - handleSaveChanges, - // GitHub specific props for the child component - editMode, - setEditMode, // Pass down if needed by GitHub component - originalPat, - currentSelectedRepos, - fetchedRepos, - setFetchedRepos, - newSelectedRepos, - setNewSelectedRepos, - isFetchingRepos, - handleFetchRepositories, - handleRepoSelectionChange, - } = useConnectorEditPage(connectorId, searchSpaceId); + // Use the custom hook to manage state and logic + const { + connectorsLoading, + connector, + isSaving, + editForm, + patForm, // Needed for GitHub child component + handleSaveChanges, + // GitHub specific props for the child component + editMode, + setEditMode, // Pass down if needed by GitHub component + originalPat, + currentSelectedRepos, + fetchedRepos, + setFetchedRepos, + newSelectedRepos, + setNewSelectedRepos, + isFetchingRepos, + handleFetchRepositories, + handleRepoSelectionChange, + } = useConnectorEditPage(connectorId, searchSpaceId); - // Redirect if connectorId is not a valid number after parsing - useEffect(() => { - if (isNaN(connectorId)) { - toast.error("Invalid Connector ID."); - router.push(`/dashboard/${searchSpaceId}/connectors`); - } - }, [connectorId, router, searchSpaceId]); + // Redirect if connectorId is not a valid number after parsing + useEffect(() => { + if (isNaN(connectorId)) { + toast.error("Invalid Connector ID."); + router.push(`/dashboard/${searchSpaceId}/connectors`); + } + }, [connectorId, router, searchSpaceId]); - // Loading State - if (connectorsLoading || !connector) { - // Handle NaN case before showing skeleton - if (isNaN(connectorId)) return null; - return ; - } + // Loading State + if (connectorsLoading || !connector) { + // Handle NaN case before showing skeleton + if (isNaN(connectorId)) return null; + return ; + } - // Main Render using data/handlers from the hook - return ( -
- + // Main Render using data/handlers from the hook + return ( +
+ - - - - - {getConnectorIcon(connector.connector_type)} - Edit {getConnectorTypeDisplay(connector.connector_type)} Connector - - - Modify connector name and configuration. - - + + + + + {getConnectorIcon(connector.connector_type)} + Edit {getConnectorTypeDisplay(connector.connector_type)} Connector + + + Modify connector name and configuration. + + -
- {/* Pass hook's handleSaveChanges */} - - - {/* Pass form control from hook */} - + + {/* Pass hook's handleSaveChanges */} + + + {/* Pass form control from hook */} + -
+
-

Configuration

+

Configuration

- {/* == GitHub == */} - {connector.connector_type === "GITHUB_CONNECTOR" && ( - - )} + {/* == GitHub == */} + {connector.connector_type === "GITHUB_CONNECTOR" && ( + + )} - {/* == Slack == */} - {connector.connector_type === "SLACK_CONNECTOR" && ( - - )} - {/* == Notion == */} - {connector.connector_type === "NOTION_CONNECTOR" && ( - - )} - {/* == Serper == */} - {connector.connector_type === "SERPER_API" && ( - - )} - {/* == Tavily == */} - {connector.connector_type === "TAVILY_API" && ( - - )} + {/* == Slack == */} + {connector.connector_type === "SLACK_CONNECTOR" && ( + + )} + {/* == Notion == */} + {connector.connector_type === "NOTION_CONNECTOR" && ( + + )} + {/* == Serper == */} + {connector.connector_type === "SERPER_API" && ( + + )} + {/* == Tavily == */} + {connector.connector_type === "TAVILY_API" && ( + + )} - {/* == Linear == */} - {connector.connector_type === "LINEAR_CONNECTOR" && ( - - )} + {/* == Linear == */} + {connector.connector_type === "LINEAR_CONNECTOR" && ( + + )} - {/* == Linkup == */} - {connector.connector_type === "LINKUP_API" && ( - - )} + {/* == Jira == */} + {connector.connector_type === "JIRA_CONNECTOR" && ( +
+ + + +
+ )} - {/* == Discord == */} - {connector.connector_type === "DISCORD_CONNECTOR" && ( - - )} + {/* == Linkup == */} + {connector.connector_type === "LINKUP_API" && ( + + )} -
- - - - - -
-
-
- ); + {/* == Discord == */} + {connector.connector_type === "DISCORD_CONNECTOR" && ( + + )} + + + + + + + + +
+ ); } diff --git a/surfsense_web/app/dashboard/[search_space_id]/connectors/[connector_id]/page.tsx b/surfsense_web/app/dashboard/[search_space_id]/connectors/[connector_id]/page.tsx index 8986444..9ed3f94 100644 --- a/surfsense_web/app/dashboard/[search_space_id]/connectors/[connector_id]/page.tsx +++ b/surfsense_web/app/dashboard/[search_space_id]/connectors/[connector_id]/page.tsx @@ -9,7 +9,10 @@ import * as z from "zod"; import { toast } from "sonner"; import { ArrowLeft, Check, Info, Loader2 } from "lucide-react"; -import { useSearchSourceConnectors, SearchSourceConnector } from "@/hooks/useSearchSourceConnectors"; +import { + useSearchSourceConnectors, + SearchSourceConnector, +} from "@/hooks/useSearchSourceConnectors"; import { Form, FormControl, @@ -28,11 +31,7 @@ import { CardHeader, CardTitle, } from "@/components/ui/card"; -import { - Alert, - AlertDescription, - AlertTitle, -} from "@/components/ui/alert"; +import { Alert, AlertDescription, AlertTitle } from "@/components/ui/alert"; // Define the form schema with Zod const apiConnectorFormSchema = z.object({ @@ -47,13 +46,15 @@ const apiConnectorFormSchema = z.object({ // Helper function to get connector type display name const getConnectorTypeDisplay = (type: string): string => { const typeMap: Record = { - "SERPER_API": "Serper API", - "TAVILY_API": "Tavily API", - "SLACK_CONNECTOR": "Slack Connector", - "NOTION_CONNECTOR": "Notion Connector", - "GITHUB_CONNECTOR": "GitHub Connector", - "DISCORD_CONNECTOR": "Discord Connector", - "LINKUP_API": "Linkup", + SERPER_API: "Serper API", + TAVILY_API: "Tavily API", + SLACK_CONNECTOR: "Slack Connector", + NOTION_CONNECTOR: "Notion Connector", + GITHUB_CONNECTOR: "GitHub Connector", + LINEAR_CONNECTOR: "Linear Connector", + JIRA_CONNECTOR: "Jira Connector", + DISCORD_CONNECTOR: "Discord Connector", + LINKUP_API: "Linkup", // Add other connector types here as needed }; return typeMap[type] || type; @@ -67,9 +68,11 @@ export default function EditConnectorPage() { const params = useParams(); const searchSpaceId = params.search_space_id as string; const connectorId = parseInt(params.connector_id as string, 10); - + const { connectors, updateConnector } = useSearchSourceConnectors(); - const [connector, setConnector] = useState(null); + const [connector, setConnector] = useState( + null, + ); const [isLoading, setIsLoading] = useState(true); const [isSubmitting, setIsSubmitting] = useState(false); // console.log("connector", connector); @@ -85,24 +88,24 @@ export default function EditConnectorPage() { // Get API key field name based on connector type const getApiKeyFieldName = (connectorType: string): string => { const fieldMap: Record = { - "SERPER_API": "SERPER_API_KEY", - "TAVILY_API": "TAVILY_API_KEY", - "SLACK_CONNECTOR": "SLACK_BOT_TOKEN", - "NOTION_CONNECTOR": "NOTION_INTEGRATION_TOKEN", - "GITHUB_CONNECTOR": "GITHUB_PAT", - "DISCORD_CONNECTOR": "DISCORD_BOT_TOKEN", - "LINKUP_API": "LINKUP_API_KEY" + SERPER_API: "SERPER_API_KEY", + TAVILY_API: "TAVILY_API_KEY", + SLACK_CONNECTOR: "SLACK_BOT_TOKEN", + NOTION_CONNECTOR: "NOTION_INTEGRATION_TOKEN", + GITHUB_CONNECTOR: "GITHUB_PAT", + DISCORD_CONNECTOR: "DISCORD_BOT_TOKEN", + LINKUP_API: "LINKUP_API_KEY", }; return fieldMap[connectorType] || ""; }; // Find connector in the list useEffect(() => { - const currentConnector = connectors.find(c => c.id === connectorId); - + const currentConnector = connectors.find((c) => c.id === connectorId); + if (currentConnector) { setConnector(currentConnector); - + // Check if connector type is supported const apiKeyField = getApiKeyFieldName(currentConnector.connector_type); if (apiKeyField) { @@ -115,7 +118,7 @@ export default function EditConnectorPage() { toast.error("This connector type is not supported for editing"); router.push(`/dashboard/${searchSpaceId}/connectors`); } - + setIsLoading(false); } else if (!isLoading && connectors.length > 0) { // If connectors are loaded but this one isn't found @@ -127,11 +130,11 @@ export default function EditConnectorPage() { // Handle form submission const onSubmit = async (values: ApiConnectorFormValues) => { if (!connector) return; - + setIsSubmitting(true); try { const apiKeyField = getApiKeyFieldName(connector.connector_type); - + // Only update the API key if a new one was provided const updatedConfig = { ...connector.config }; if (values.api_key) { @@ -150,7 +153,9 @@ export default function EditConnectorPage() { router.push(`/dashboard/${searchSpaceId}/connectors`); } catch (error) { console.error("Error updating connector:", error); - toast.error(error instanceof Error ? error.message : "Failed to update connector"); + toast.error( + error instanceof Error ? error.message : "Failed to update connector", + ); } finally { setIsSubmitting(false); } @@ -186,24 +191,30 @@ export default function EditConnectorPage() { - Edit {connector ? getConnectorTypeDisplay(connector.connector_type) : ""} Connector + Edit{" "} + {connector + ? getConnectorTypeDisplay(connector.connector_type) + : ""}{" "} + Connector - - Update your connector settings. - + Update your connector settings. API Key Security - Your API key is stored securely. For security reasons, we don't display your existing API key. - If you don't update the API key field, your existing key will be preserved. + Your API key is stored securely. For security reasons, we don't + display your existing API key. If you don't update the API key + field, your existing key will be preserved.
- + ( - {connector?.connector_type === "SLACK_CONNECTOR" - ? "Slack Bot Token" - : connector?.connector_type === "NOTION_CONNECTOR" - ? "Notion Integration Token" + {connector?.connector_type === "SLACK_CONNECTOR" + ? "Slack Bot Token" + : connector?.connector_type === "NOTION_CONNECTOR" + ? "Notion Integration Token" : connector?.connector_type === "GITHUB_CONNECTOR" ? "GitHub Personal Access Token (PAT)" : connector?.connector_type === "LINKUP_API" @@ -238,27 +249,28 @@ export default function EditConnectorPage() { : "API Key"} - - {connector?.connector_type === "SLACK_CONNECTOR" - ? "Enter a new Slack Bot Token or leave blank to keep your existing token." - : connector?.connector_type === "NOTION_CONNECTOR" - ? "Enter a new Notion Integration Token or leave blank to keep your existing token." + {connector?.connector_type === "SLACK_CONNECTOR" + ? "Enter a new Slack Bot Token or leave blank to keep your existing token." + : connector?.connector_type === "NOTION_CONNECTOR" + ? "Enter a new Notion Integration Token or leave blank to keep your existing token." : connector?.connector_type === "GITHUB_CONNECTOR" ? "Enter a new GitHub PAT or leave blank to keep your existing token." : connector?.connector_type === "LINKUP_API" @@ -271,8 +283,8 @@ export default function EditConnectorPage() { />
-
); -} +} diff --git a/surfsense_web/app/dashboard/[search_space_id]/connectors/add/jira-connector/page.tsx b/surfsense_web/app/dashboard/[search_space_id]/connectors/add/jira-connector/page.tsx new file mode 100644 index 0000000..23e128f --- /dev/null +++ b/surfsense_web/app/dashboard/[search_space_id]/connectors/add/jira-connector/page.tsx @@ -0,0 +1,472 @@ +"use client"; + +import { useState } from "react"; +import { useRouter, useParams } from "next/navigation"; +import { motion } from "framer-motion"; +import { zodResolver } from "@hookform/resolvers/zod"; +import { useForm } from "react-hook-form"; +import * as z from "zod"; +import { toast } from "sonner"; +import { ArrowLeft, Check, Info, Loader2 } from "lucide-react"; + +import { useSearchSourceConnectors } from "@/hooks/useSearchSourceConnectors"; +import { + Form, + FormControl, + FormDescription, + FormField, + FormItem, + FormLabel, + FormMessage, +} from "@/components/ui/form"; +import { Input } from "@/components/ui/input"; +import { Button } from "@/components/ui/button"; +import { + Card, + CardContent, + CardDescription, + CardFooter, + CardHeader, + CardTitle, +} from "@/components/ui/card"; +import { Alert, AlertDescription, AlertTitle } from "@/components/ui/alert"; +import { + Accordion, + AccordionContent, + AccordionItem, + AccordionTrigger, +} from "@/components/ui/accordion"; +import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"; + +// Define the form schema with Zod +const jiraConnectorFormSchema = z.object({ + name: z.string().min(3, { + message: "Connector name must be at least 3 characters.", + }), + base_url: z + .string() + .url({ + message: + "Please enter a valid Jira URL (e.g., https://yourcompany.atlassian.net)", + }) + .refine( + (url) => { + return url.includes("atlassian.net") || url.includes("jira"); + }, + { + message: "Please enter a valid Jira instance URL", + }, + ), + email: z.string().email({ + message: "Please enter a valid email address.", + }), + api_token: z.string().min(10, { + message: "Jira API Token is required and must be valid.", + }), +}); + +// Define the type for the form values +type JiraConnectorFormValues = z.infer; + +export default function JiraConnectorPage() { + const router = useRouter(); + const params = useParams(); + const searchSpaceId = params.search_space_id as string; + const [isSubmitting, setIsSubmitting] = useState(false); + const { createConnector } = useSearchSourceConnectors(); + + // Initialize the form + const form = useForm({ + resolver: zodResolver(jiraConnectorFormSchema), + defaultValues: { + name: "Jira Connector", + base_url: "", + email: "", + api_token: "", + }, + }); + + // Handle form submission + const onSubmit = async (values: JiraConnectorFormValues) => { + setIsSubmitting(true); + try { + await createConnector({ + name: values.name, + connector_type: "JIRA_CONNECTOR", + config: { + JIRA_BASE_URL: values.base_url, + JIRA_EMAIL: values.email, + JIRA_API_TOKEN: values.api_token, + }, + is_indexable: true, + last_indexed_at: null, + }); + + toast.success("Jira connector created successfully!"); + + // Navigate back to connectors page + router.push(`/dashboard/${searchSpaceId}/connectors`); + } catch (error) { + console.error("Error creating connector:", error); + toast.error( + error instanceof Error ? error.message : "Failed to create connector", + ); + } finally { + setIsSubmitting(false); + } + }; + + return ( +
+ + + + + + Connect + Documentation + + + + + + + Connect Jira Instance + + + Integrate with Jira to search and retrieve information from + your issues, tickets, and comments. This connector can index + your Jira content for search. + + + + + + Jira Personal Access Token Required + + You'll need a Jira Personal Access Token to use this + connector. You can create one from{" "} + + Atlassian Account Settings + + + + + + + ( + + Connector Name + + + + + A friendly name to identify this connector. + + + + )} + /> + + ( + + Jira Instance URL + + + + + Your Jira instance URL. For Atlassian Cloud, this is + typically https://yourcompany.atlassian.net + + + + )} + /> + + ( + + Email Address + + + + + Your Atlassian account email address. + + + + )} + /> + + ( + + API Token + + + + + Your Jira API Token will be encrypted and stored securely. + + + + )} + /> + +
+ +
+ + +
+ +

+ What you get with Jira integration: +

+
    +
  • Search through all your Jira issues and tickets
  • +
  • + Access issue descriptions, comments, and full discussion + threads +
  • +
  • + Connect your team's project management directly to your + search space +
  • +
  • + Keep your search results up-to-date with latest Jira content +
  • +
  • + Index your Jira issues for enhanced search capabilities +
  • +
  • + Search by issue keys, status, priority, and assignee + information +
  • +
+
+
+
+ + + + + + Jira Connector Documentation + + + Learn how to set up and use the Jira connector to index your + project management data. + + + +
+

How it works

+

+ The Jira connector uses the Jira REST API with Basic Authentication + to fetch all issues and comments that your account has + access to within your Jira instance. +

+
    +
  • + For follow up indexing runs, the connector retrieves + issues and comments that have been updated since the last + indexing attempt. +
  • +
  • + Indexing is configured to run periodically, so updates + should appear in your search results within minutes. +
  • +
+
+ + + + + Authorization + + + + + Read-Only Access is Sufficient + + You only need read access for this connector to work. + The API Token will only be used to read your Jira data. + + + +
+
+

+ Step 1: Create an API Token +

+
    +
  1. Log in to your Atlassian account
  2. +
  3. + Navigate to{" "} + + https://id.atlassian.com/manage-profile/security/api-tokens + +
  4. +
  5. + Click Create API token +
  6. +
  7. + Enter a label for your token (like "SurfSense + Connector") +
  8. +
  9. + Click Create +
  10. +
  11. + Copy the generated token as it will only be shown + once +
  12. +
+
+ +
+

+ Step 2: Grant necessary access +

+

+ The API Token will have access to all projects and + issues that your user account can see. Make sure your + account has appropriate permissions for the projects + you want to index. +

+ + + Data Privacy + + Only issues, comments, and basic metadata will be + indexed. Jira attachments and linked files are not + indexed by this connector. + + +
+
+
+
+ + + + Indexing + + +
    +
  1. + Navigate to the Connector Dashboard and select the{" "} + Jira Connector. +
  2. +
  3. + Enter your Jira Instance URL (e.g., + https://yourcompany.atlassian.net) +
  4. +
  5. + Place your Personal Access Token in + the form field. +
  6. +
  7. + Click Connect to establish the + connection. +
  8. +
  9. + Once connected, your Jira issues will be indexed + automatically. +
  10. +
+ + + + What Gets Indexed + +

+ The Jira connector indexes the following data: +

+
    +
  • Issue keys and summaries (e.g., PROJ-123)
  • +
  • Issue descriptions
  • +
  • Issue comments and discussion threads
  • +
  • + Issue status, priority, and type information +
  • +
  • Assignee and reporter information
  • +
  • Project information
  • +
+
+
+
+
+
+
+
+
+
+
+
+ ); +} diff --git a/surfsense_web/app/dashboard/[search_space_id]/connectors/add/page.tsx b/surfsense_web/app/dashboard/[search_space_id]/connectors/add/page.tsx index afcc0af..3d0e59d 100644 --- a/surfsense_web/app/dashboard/[search_space_id]/connectors/add/page.tsx +++ b/surfsense_web/app/dashboard/[search_space_id]/connectors/add/page.tsx @@ -1,8 +1,17 @@ "use client"; import { Badge } from "@/components/ui/badge"; import { Button } from "@/components/ui/button"; -import { Card, CardContent, CardFooter, CardHeader } from "@/components/ui/card"; -import { Collapsible, CollapsibleContent, CollapsibleTrigger } from "@/components/ui/collapsible"; +import { + Card, + CardContent, + CardFooter, + CardHeader, +} from "@/components/ui/card"; +import { + Collapsible, + CollapsibleContent, + CollapsibleTrigger, +} from "@/components/ui/collapsible"; import { IconBrandDiscord, IconBrandGithub, @@ -67,23 +76,26 @@ const connectorCategories: ConnectorCategory[] = [ { id: "slack-connector", title: "Slack", - description: "Connect to your Slack workspace to access messages and channels.", + description: + "Connect to your Slack workspace to access messages and channels.", icon: , status: "available", }, { id: "ms-teams", title: "Microsoft Teams", - description: "Connect to Microsoft Teams to access your team's conversations.", + description: + "Connect to Microsoft Teams to access your team's conversations.", icon: , status: "coming-soon", }, { id: "discord-connector", title: "Discord", - description: "Connect to Discord servers to access messages and channels.", + description: + "Connect to Discord servers to access messages and channels.", icon: , - status: "available" + status: "available", }, ], }, @@ -94,16 +106,18 @@ const connectorCategories: ConnectorCategory[] = [ { id: "linear-connector", title: "Linear", - description: "Connect to Linear to search issues, comments and project data.", + description: + "Connect to Linear to search issues, comments and project data.", icon: , status: "available", }, { id: "jira-connector", title: "Jira", - description: "Connect to Jira to search issues, tickets and project data.", + description: + "Connect to Jira to search issues, tickets and project data.", icon: , - status: "coming-soon", + status: "available", }, ], }, @@ -114,14 +128,16 @@ const connectorCategories: ConnectorCategory[] = [ { id: "notion-connector", title: "Notion", - description: "Connect to your Notion workspace to access pages and databases.", + description: + "Connect to your Notion workspace to access pages and databases.", icon: , status: "available", }, { id: "github-connector", title: "GitHub", - description: "Connect a GitHub PAT to index code and docs from accessible repositories.", + description: + "Connect a GitHub PAT to index code and docs from accessible repositories.", icon: , status: "available", }, @@ -141,7 +157,8 @@ const connectorCategories: ConnectorCategory[] = [ { id: "zoom", title: "Zoom", - description: "Connect to Zoom to access meeting recordings and transcripts.", + description: + "Connect to Zoom to access meeting recordings and transcripts.", icon: , status: "coming-soon", }, @@ -152,7 +169,7 @@ const connectorCategories: ConnectorCategory[] = [ // Animation variants const fadeIn = { hidden: { opacity: 0 }, - visible: { opacity: 1, transition: { duration: 0.4 } } + visible: { opacity: 1, transition: { duration: 0.4 } }, }; const staggerContainer = { @@ -160,43 +177,49 @@ const staggerContainer = { visible: { opacity: 1, transition: { - staggerChildren: 0.1 - } - } + staggerChildren: 0.1, + }, + }, }; const cardVariants = { hidden: { opacity: 0, y: 20 }, - visible: { - opacity: 1, + visible: { + opacity: 1, y: 0, - transition: { + transition: { type: "spring", stiffness: 260, - damping: 20 - } + damping: 20, + }, }, - hover: { + hover: { scale: 1.02, - boxShadow: "0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05)", - transition: { + boxShadow: + "0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05)", + transition: { type: "spring", stiffness: 400, - damping: 10 - } - } + damping: 10, + }, + }, }; export default function ConnectorsPage() { const params = useParams(); const searchSpaceId = params.search_space_id as string; - const [expandedCategories, setExpandedCategories] = useState(["search-engines", "knowledge-bases", "project-management", "team-chats"]); + const [expandedCategories, setExpandedCategories] = useState([ + "search-engines", + "knowledge-bases", + "project-management", + "team-chats", + ]); const toggleCategory = (categoryId: string) => { - setExpandedCategories(prev => - prev.includes(categoryId) - ? prev.filter(id => id !== categoryId) - : [...prev, categoryId] + setExpandedCategories((prev) => + prev.includes(categoryId) + ? prev.filter((id) => id !== categoryId) + : [...prev, categoryId], ); }; @@ -205,9 +228,9 @@ export default function ConnectorsPage() { @@ -215,18 +238,19 @@ export default function ConnectorsPage() { Connect Your Tools

- Integrate with your favorite services to enhance your research capabilities. + Integrate with your favorite services to enhance your research + capabilities.

- {connectorCategories.map((category) => ( -

{category.title}

-
- + -
-

{connector.title}

+

+ {connector.title} +

{connector.status === "coming-soon" && ( - + Coming soon )} {connector.status === "connected" && ( - + Connected )}
- +

{connector.description}

- + - {connector.status === 'available' && ( - - )} - {connector.status === 'coming-soon' && ( - )} - {connector.status === 'connected' && ( - )} diff --git a/surfsense_web/app/dashboard/[search_space_id]/documents/(manage)/page.tsx b/surfsense_web/app/dashboard/[search_space_id]/documents/(manage)/page.tsx index 7d9aa3c..1b66684 100644 --- a/surfsense_web/app/dashboard/[search_space_id]/documents/(manage)/page.tsx +++ b/surfsense_web/app/dashboard/[search_space_id]/documents/(manage)/page.tsx @@ -3,88 +3,111 @@ import { DocumentViewer } from "@/components/document-viewer"; import { JsonMetadataViewer } from "@/components/json-metadata-viewer"; import { - AlertDialog, - AlertDialogAction, - AlertDialogCancel, - AlertDialogContent, - AlertDialogDescription, - AlertDialogFooter, - AlertDialogHeader, - AlertDialogTitle, - AlertDialogTrigger, + AlertDialog, + AlertDialogAction, + AlertDialogCancel, + AlertDialogContent, + AlertDialogDescription, + AlertDialogFooter, + AlertDialogHeader, + AlertDialogTitle, + AlertDialogTrigger, } from "@/components/ui/alert-dialog"; import { Button } from "@/components/ui/button"; import { Checkbox } from "@/components/ui/checkbox"; import { - DropdownMenu, - DropdownMenuCheckboxItem, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuLabel, - DropdownMenuSeparator, - DropdownMenuTrigger, + DropdownMenu, + DropdownMenuCheckboxItem, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuLabel, + DropdownMenuSeparator, + DropdownMenuTrigger, } from "@/components/ui/dropdown-menu"; import { Input } from "@/components/ui/input"; import { Label } from "@/components/ui/label"; -import { Pagination, PaginationContent, PaginationItem } from "@/components/ui/pagination"; -import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover"; import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue, + Pagination, + PaginationContent, + PaginationItem, +} from "@/components/ui/pagination"; +import { + Popover, + PopoverContent, + PopoverTrigger, +} from "@/components/ui/popover"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, } from "@/components/ui/select"; import { - Table, - TableBody, - TableCell, - TableHead, - TableHeader, - TableRow, + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, } from "@/components/ui/table"; import { useDocuments } from "@/hooks/use-documents"; import { cn } from "@/lib/utils"; -import { IconBrandDiscord, IconBrandGithub, IconBrandNotion, IconBrandSlack, IconBrandYoutube, IconLayoutKanban } from "@tabler/icons-react"; import { - ColumnDef, - ColumnFiltersState, - FilterFn, - PaginationState, - Row, - SortingState, - VisibilityState, - flexRender, - getCoreRowModel, - getFacetedUniqueValues, - getFilteredRowModel, - getPaginationRowModel, - getSortedRowModel, - useReactTable, + IconBrandDiscord, + IconBrandGithub, + IconBrandNotion, + IconBrandSlack, + IconBrandYoutube, + IconLayoutKanban, + IconTicket, +} from "@tabler/icons-react"; +import { + ColumnDef, + ColumnFiltersState, + FilterFn, + PaginationState, + Row, + SortingState, + VisibilityState, + flexRender, + getCoreRowModel, + getFacetedUniqueValues, + getFilteredRowModel, + getPaginationRowModel, + getSortedRowModel, + useReactTable, } from "@tanstack/react-table"; import { AnimatePresence, motion } from "framer-motion"; import { - AlertCircle, - ChevronDown, - ChevronFirst, - ChevronLast, - ChevronLeft, - ChevronRight, - ChevronUp, - CircleAlert, - CircleX, - Columns3, - File, - FileX, - Filter, - Globe, - ListFilter, - MoreHorizontal, - Trash, - Webhook + AlertCircle, + ChevronDown, + ChevronFirst, + ChevronLast, + ChevronLeft, + ChevronRight, + ChevronUp, + CircleAlert, + CircleX, + Columns3, + File, + FileX, + Filter, + Globe, + ListFilter, + MoreHorizontal, + Trash, + Webhook, } from "lucide-react"; import { useParams } from "next/navigation"; -import React, { useContext, useEffect, useId, useMemo, useRef, useState } from "react"; +import React, { + useContext, + useEffect, + useId, + useMemo, + useRef, + useState, +} from "react"; import ReactMarkdown from "react-markdown"; import rehypeRaw from "rehype-raw"; import rehypeSanitize from "rehype-sanitize"; @@ -93,938 +116,1065 @@ import { toast } from "sonner"; // Define animation variants for reuse const fadeInScale = { - hidden: { opacity: 0, scale: 0.95 }, - visible: { - opacity: 1, - scale: 1, - transition: { type: "spring", stiffness: 300, damping: 30 } - }, - exit: { - opacity: 0, - scale: 0.95, - transition: { duration: 0.15 } - } + hidden: { opacity: 0, scale: 0.95 }, + visible: { + opacity: 1, + scale: 1, + transition: { type: "spring", stiffness: 300, damping: 30 }, + }, + exit: { + opacity: 0, + scale: 0.95, + transition: { duration: 0.15 }, + }, }; type Document = { - id: number; - title: string; - document_type: "EXTENSION" | "CRAWLED_URL" | "SLACK_CONNECTOR" | "NOTION_CONNECTOR" | "FILE" | "YOUTUBE_VIDEO" | "LINEAR_CONNECTOR" | "DISCORD_CONNECTOR"; - document_metadata: any; - content: string; - created_at: string; - search_space_id: number; + id: number; + title: string; + document_type: + | "EXTENSION" + | "CRAWLED_URL" + | "SLACK_CONNECTOR" + | "NOTION_CONNECTOR" + | "FILE" + | "YOUTUBE_VIDEO" + | "LINEAR_CONNECTOR" + | "DISCORD_CONNECTOR"; + document_metadata: any; + content: string; + created_at: string; + search_space_id: number; }; // Custom filter function for multi-column searching -const multiColumnFilterFn: FilterFn = (row, columnId, filterValue) => { - const searchableRowContent = `${row.original.title}`.toLowerCase(); - const searchTerm = (filterValue ?? "").toLowerCase(); - return searchableRowContent.includes(searchTerm); +const multiColumnFilterFn: FilterFn = ( + row, + columnId, + filterValue, +) => { + const searchableRowContent = `${row.original.title}`.toLowerCase(); + const searchTerm = (filterValue ?? "").toLowerCase(); + return searchableRowContent.includes(searchTerm); }; -const statusFilterFn: FilterFn = (row, columnId, filterValue: string[]) => { - if (!filterValue?.length) return true; - const status = row.getValue(columnId) as string; - return filterValue.includes(status); +const statusFilterFn: FilterFn = ( + row, + columnId, + filterValue: string[], +) => { + if (!filterValue?.length) return true; + const status = row.getValue(columnId) as string; + return filterValue.includes(status); }; // Add document type icons mapping const documentTypeIcons = { - EXTENSION: Webhook, - CRAWLED_URL: Globe, - SLACK_CONNECTOR: IconBrandSlack, - NOTION_CONNECTOR: IconBrandNotion, - FILE: File, - YOUTUBE_VIDEO: IconBrandYoutube, - GITHUB_CONNECTOR: IconBrandGithub, - LINEAR_CONNECTOR: IconLayoutKanban, - DISCORD_CONNECTOR: IconBrandDiscord, + EXTENSION: Webhook, + CRAWLED_URL: Globe, + SLACK_CONNECTOR: IconBrandSlack, + NOTION_CONNECTOR: IconBrandNotion, + FILE: File, + YOUTUBE_VIDEO: IconBrandYoutube, + GITHUB_CONNECTOR: IconBrandGithub, + LINEAR_CONNECTOR: IconLayoutKanban, + JIRA_CONNECTOR: IconTicket, + DISCORD_CONNECTOR: IconBrandDiscord, } as const; const columns: ColumnDef[] = [ - { - id: "select", - header: ({ table }) => ( - table.toggleAllPageRowsSelected(!!value)} - aria-label="Select all" - /> - ), - cell: ({ row }) => ( - row.toggleSelected(!!value)} - aria-label="Select row" - /> - ), - size: 28, - enableSorting: false, - enableHiding: false, + { + id: "select", + header: ({ table }) => ( + table.toggleAllPageRowsSelected(!!value)} + aria-label="Select all" + /> + ), + cell: ({ row }) => ( + row.toggleSelected(!!value)} + aria-label="Select row" + /> + ), + size: 28, + enableSorting: false, + enableHiding: false, + }, + { + header: "Title", + accessorKey: "title", + cell: ({ row }) => { + const Icon = documentTypeIcons[row.original.document_type]; + return ( + + + {row.getValue("title")} + + ); }, - { - header: "Title", - accessorKey: "title", - cell: ({ row }) => { - const Icon = documentTypeIcons[row.original.document_type]; - return ( - - - {row.getValue("title")} - - ); - }, - size: 250, + size: 250, + }, + { + header: "Type", + accessorKey: "document_type", + cell: ({ row }) => { + const type = row.getValue( + "document_type", + ) as keyof typeof documentTypeIcons; + const Icon = documentTypeIcons[type]; + return ( +
+
+ +
+ + {type + .split("_") + .map((word) => word.charAt(0) + word.slice(1).toLowerCase()) + .join(" ")} + +
+ ); }, - { - header: "Type", - accessorKey: "document_type", - cell: ({ row }) => { - const type = row.getValue("document_type") as keyof typeof documentTypeIcons; - const Icon = documentTypeIcons[type]; - return ( -
-
- -
- - {type.split('_').map(word => word.charAt(0) + word.slice(1).toLowerCase()).join(' ')} - -
- ); - }, - size: 180, + size: 180, + }, + { + header: "Content Summary", + accessorKey: "content", + cell: ({ row }) => { + const content = row.getValue("content") as string; + const title = row.getValue("title") as string; + + // Create a truncated preview (first 150 characters) + const previewContent = + content.length > 150 ? content.substring(0, 150) + "..." : content; + + return ( + + ); }, - { - header: "Content Summary", - accessorKey: "content", - cell: ({ row }) => { - const content = row.getValue("content") as string; - const title = row.getValue("title") as string; - - // Create a truncated preview (first 150 characters) - const previewContent = content.length > 150 - ? content.substring(0, 150) + "..." - : content; - - return ( - - ); - }, - size: 300, - }, - { - header: "Created At", - accessorKey: "created_at", - cell: ({ row }) => { - const date = new Date(row.getValue("created_at")); - return date.toLocaleDateString(); - }, - size: 120, - }, - { - id: "actions", - header: () => Actions, - cell: ({ row }) => , - size: 60, - enableHiding: false, + size: 300, + }, + { + header: "Created At", + accessorKey: "created_at", + cell: ({ row }) => { + const date = new Date(row.getValue("created_at")); + return date.toLocaleDateString(); }, + size: 120, + }, + { + id: "actions", + header: () => Actions, + cell: ({ row }) => , + size: 60, + enableHiding: false, + }, ]; // Create a context to share the deleteDocument function const DocumentsContext = React.createContext<{ - deleteDocument: (id: number) => Promise; - refreshDocuments: () => Promise; + deleteDocument: (id: number) => Promise; + refreshDocuments: () => Promise; } | null>(null); export default function DocumentsTable() { - const id = useId(); - const params = useParams(); - const searchSpaceId = Number(params.search_space_id); - const { documents, loading, error, refreshDocuments, deleteDocument } = useDocuments(searchSpaceId); - - // console.log("Search Space ID:", searchSpaceId); - // console.log("Documents loaded:", documents?.length); - - useEffect(() => { - console.log("Delete document function available:", !!deleteDocument); - }, [deleteDocument]); - - const [columnFilters, setColumnFilters] = useState([]); - const [columnVisibility, setColumnVisibility] = useState({}); - const [pagination, setPagination] = useState({ - pageIndex: 0, - pageSize: 10, - }); - const inputRef = useRef(null); + const id = useId(); + const params = useParams(); + const searchSpaceId = Number(params.search_space_id); + const { documents, loading, error, refreshDocuments, deleteDocument } = + useDocuments(searchSpaceId); - const [sorting, setSorting] = useState([ - { - id: "title", - desc: false, - }, - ]); + // console.log("Search Space ID:", searchSpaceId); + // console.log("Documents loaded:", documents?.length); - const [data, setData] = useState([]); - - useEffect(() => { - if (documents) { - setData(documents); - } - }, [documents]); + useEffect(() => { + console.log("Delete document function available:", !!deleteDocument); + }, [deleteDocument]); - const handleDeleteRows = async () => { - const selectedRows = table.getSelectedRowModel().rows; - // console.log("Deleting selected rows:", selectedRows.length); - - if (selectedRows.length === 0) { - toast.error("No rows selected"); - return; - } - - // Create an array of promises for each delete operation - const deletePromises = selectedRows.map(row => { - // console.log("Deleting row with ID:", row.original.id); - return deleteDocument(row.original.id); - }); - - try { - // Execute all delete operations - const results = await Promise.all(deletePromises); - // console.log("Delete results:", results); - - // Check if all deletions were successful - const allSuccessful = results.every(result => result === true); - - if (allSuccessful) { - toast.success(`Successfully deleted ${selectedRows.length} document(s)`); - } else { - toast.error("Some documents could not be deleted"); - } - - // Refresh the documents list after all deletions - await refreshDocuments(); - table.resetRowSelection(); - } catch (error: any) { - console.error("Error deleting documents:", error); - toast.error("Error deleting documents"); - } - }; + const [columnFilters, setColumnFilters] = useState([]); + const [columnVisibility, setColumnVisibility] = useState({}); + const [pagination, setPagination] = useState({ + pageIndex: 0, + pageSize: 10, + }); + const inputRef = useRef(null); - const table = useReactTable({ - data, - columns, - getCoreRowModel: getCoreRowModel(), - getSortedRowModel: getSortedRowModel(), - onSortingChange: setSorting, - enableSortingRemoval: false, - getPaginationRowModel: getPaginationRowModel(), - onPaginationChange: setPagination, - onColumnFiltersChange: setColumnFilters, - onColumnVisibilityChange: setColumnVisibility, - getFilteredRowModel: getFilteredRowModel(), - getFacetedUniqueValues: getFacetedUniqueValues(), - state: { - sorting, - pagination, - columnFilters, - columnVisibility, - }, + const [sorting, setSorting] = useState([ + { + id: "title", + desc: false, + }, + ]); + + const [data, setData] = useState([]); + + useEffect(() => { + if (documents) { + setData(documents); + } + }, [documents]); + + const handleDeleteRows = async () => { + const selectedRows = table.getSelectedRowModel().rows; + // console.log("Deleting selected rows:", selectedRows.length); + + if (selectedRows.length === 0) { + toast.error("No rows selected"); + return; + } + + // Create an array of promises for each delete operation + const deletePromises = selectedRows.map((row) => { + // console.log("Deleting row with ID:", row.original.id); + return deleteDocument(row.original.id); }); - // Get unique status values - const uniqueStatusValues = useMemo(() => { - const statusColumn = table.getColumn("document_type"); + try { + // Execute all delete operations + const results = await Promise.all(deletePromises); + // console.log("Delete results:", results); - if (!statusColumn) return []; + // Check if all deletions were successful + const allSuccessful = results.every((result) => result === true); - const values = Array.from(statusColumn.getFacetedUniqueValues().keys()); + if (allSuccessful) { + toast.success( + `Successfully deleted ${selectedRows.length} document(s)`, + ); + } else { + toast.error("Some documents could not be deleted"); + } - return values.sort(); - }, [table.getColumn("document_type")?.getFacetedUniqueValues()]); + // Refresh the documents list after all deletions + await refreshDocuments(); + table.resetRowSelection(); + } catch (error: any) { + console.error("Error deleting documents:", error); + toast.error("Error deleting documents"); + } + }; - // Get counts for each status - const statusCounts = useMemo(() => { - const statusColumn = table.getColumn("document_type"); - if (!statusColumn) return new Map(); - return statusColumn.getFacetedUniqueValues(); - }, [table.getColumn("document_type")?.getFacetedUniqueValues()]); + const table = useReactTable({ + data, + columns, + getCoreRowModel: getCoreRowModel(), + getSortedRowModel: getSortedRowModel(), + onSortingChange: setSorting, + enableSortingRemoval: false, + getPaginationRowModel: getPaginationRowModel(), + onPaginationChange: setPagination, + onColumnFiltersChange: setColumnFilters, + onColumnVisibilityChange: setColumnVisibility, + getFilteredRowModel: getFilteredRowModel(), + getFacetedUniqueValues: getFacetedUniqueValues(), + state: { + sorting, + pagination, + columnFilters, + columnVisibility, + }, + }); - const selectedStatuses = useMemo(() => { - const filterValue = table.getColumn("document_type")?.getFilterValue() as string[]; - return filterValue ?? []; - }, [table.getColumn("document_type")?.getFilterValue()]); + // Get unique status values + const uniqueStatusValues = useMemo(() => { + const statusColumn = table.getColumn("document_type"); - const handleStatusChange = (checked: boolean, value: string) => { - const filterValue = table.getColumn("document_type")?.getFilterValue() as string[]; - const newFilterValue = filterValue ? [...filterValue] : []; + if (!statusColumn) return []; - if (checked) { - newFilterValue.push(value); - } else { - const index = newFilterValue.indexOf(value); - if (index > -1) { - newFilterValue.splice(index, 1); - } - } + const values = Array.from(statusColumn.getFacetedUniqueValues().keys()); - table.getColumn("document_type")?.setFilterValue(newFilterValue.length ? newFilterValue : undefined); - }; + return values.sort(); + }, [table.getColumn("document_type")?.getFacetedUniqueValues()]); - return ( - Promise.resolve(false)), - refreshDocuments: refreshDocuments || (() => Promise.resolve()) - }}> + // Get counts for each status + const statusCounts = useMemo(() => { + const statusColumn = table.getColumn("document_type"); + if (!statusColumn) return new Map(); + return statusColumn.getFacetedUniqueValues(); + }, [table.getColumn("document_type")?.getFacetedUniqueValues()]); + + const selectedStatuses = useMemo(() => { + const filterValue = table + .getColumn("document_type") + ?.getFilterValue() as string[]; + return filterValue ?? []; + }, [table.getColumn("document_type")?.getFilterValue()]); + + const handleStatusChange = (checked: boolean, value: string) => { + const filterValue = table + .getColumn("document_type") + ?.getFilterValue() as string[]; + const newFilterValue = filterValue ? [...filterValue] : []; + + if (checked) { + newFilterValue.push(value); + } else { + const index = newFilterValue.indexOf(value); + if (index > -1) { + newFilterValue.splice(index, 1); + } + } + + table + .getColumn("document_type") + ?.setFilterValue(newFilterValue.length ? newFilterValue : undefined); + }; + + return ( + Promise.resolve(false)), + refreshDocuments: refreshDocuments || (() => Promise.resolve()), + }} + > + + {/* Filters */} + +
+ {/* Filter by name or email */} - {/* Filters */} - + table.getColumn("title")?.setFilterValue(e.target.value) + } + placeholder="Filter by title..." + type="text" + aria-label="Filter by title" + /> + + + {Boolean(table.getColumn("title")?.getFilterValue()) && ( + { + table.getColumn("title")?.setFilterValue(""); + if (inputRef.current) { + inputRef.current.focus(); + } + }} + initial={{ opacity: 0, rotate: -90 }} + animate={{ opacity: 1, rotate: 0 }} + exit={{ opacity: 0, rotate: 90 }} + whileHover={{ scale: 1.1 }} + whileTap={{ scale: 0.9 }} > -
- {/* Filter by name or email */} - - table.getColumn("title")?.setFilterValue(e.target.value)} - placeholder="Filter by title..." - type="text" - aria-label="Filter by title" - /> - - - {Boolean(table.getColumn("title")?.getFilterValue()) && ( - { - table.getColumn("title")?.setFilterValue(""); - if (inputRef.current) { - inputRef.current.focus(); - } - }} - initial={{ opacity: 0, rotate: -90 }} - animate={{ opacity: 1, rotate: 0 }} - exit={{ opacity: 0, rotate: 90 }} - whileHover={{ scale: 1.1 }} - whileTap={{ scale: 0.9 }} - > - - )} - - {/* Filter by status */} - - - - - - - - -
-
Filters
-
- - {uniqueStatusValues.map((value, i) => ( - - handleStatusChange(checked, value)} - /> - - - ))} - -
-
-
-
-
- {/* Toggle columns visibility */} - - - - - - - - - Toggle columns - {table - .getAllColumns() - .filter((column) => column.getCanHide()) - .map((column) => { - return ( - column.toggleVisibility(!!value)} - onSelect={(event) => event.preventDefault()} - > - {column.id} - - ); - })} - - - +
+
- {/* Table */} - + {loading ? ( +
+
+
+

+ Loading documents... +

+
+
+ ) : error ? ( +
+
+ +

+ Error loading documents +

+ -
-
- ) : data.length === 0 ? ( -
-
- -

No documents found

-
-
- ) : ( - - - {table.getHeaderGroups().map((headerGroup) => ( - - {headerGroup.headers.map((header) => { - return ( - - {header.isPlaceholder ? null : header.column.getCanSort() ? ( -
{ - // Enhanced keyboard handling for sorting - if ( - header.column.getCanSort() && - (e.key === "Enter" || e.key === " ") - ) { - e.preventDefault(); - header.column.getToggleSortingHandler()?.(e); - } - }} - tabIndex={header.column.getCanSort() ? 0 : undefined} - > - {flexRender(header.column.columnDef.header, header.getContext())} - {{ - asc: ( -
- ) : ( - flexRender(header.column.columnDef.header, header.getContext()) - )} -
- ); - })} -
- ))} -
- - - {table.getRowModel().rows?.length ? ( - table.getRowModel().rows.map((row, index) => ( - - {row.getVisibleCells().map((cell) => ( - - {flexRender(cell.column.columnDef.cell, cell.getContext())} - - ))} - - )) - ) : ( - - - No documents found. - - - )} - - -
- )} -
- - {/* Pagination */} -
- {/* Results per page */} - - - - - {/* Page number information */} - -

- - {table.getState().pagination.pageIndex * table.getState().pagination.pageSize + 1}- - {Math.min( - Math.max( - table.getState().pagination.pageIndex * table.getState().pagination.pageSize + - table.getState().pagination.pageSize, - 0, - ), - table.getRowCount(), - )} - {" "} - of {table.getRowCount().toString()} -

-
+ {header.isPlaceholder ? null : header.column.getCanSort() ? ( +
{ + // Enhanced keyboard handling for sorting + if ( + header.column.getCanSort() && + (e.key === "Enter" || e.key === " ") + ) { + e.preventDefault(); + header.column.getToggleSortingHandler()?.(e); + } + }} + tabIndex={ + header.column.getCanSort() ? 0 : undefined + } + > + {flexRender( + header.column.columnDef.header, + header.getContext(), + )} + {{ + asc: ( +
+ ) : ( + flexRender( + header.column.columnDef.header, + header.getContext(), + ) + )} + + ); + })} + + ))} + + + + {table.getRowModel().rows?.length ? ( + table.getRowModel().rows.map((row, index) => ( + + {row.getVisibleCells().map((cell) => ( + + {flexRender( + cell.column.columnDef.cell, + cell.getContext(), + )} + + ))} + + )) + ) : ( + + + No documents found. + + + )} + + + + )} + - {/* Pagination buttons */} -
- - - {/* First page button */} - - - - - - {/* Previous page button */} - - - - - - {/* Next page button */} - - - - - - {/* Last page button */} - - - - - - - -
-
-
- - ); + {/* Pagination */} +
+ {/* Results per page */} + + + + + {/* Page number information */} + +

+ + {table.getState().pagination.pageIndex * + table.getState().pagination.pageSize + + 1} + - + {Math.min( + Math.max( + table.getState().pagination.pageIndex * + table.getState().pagination.pageSize + + table.getState().pagination.pageSize, + 0, + ), + table.getRowCount(), + )} + {" "} + of{" "} + + {table.getRowCount().toString()} + +

+
+ + {/* Pagination buttons */} +
+ + + {/* First page button */} + + + + + + {/* Previous page button */} + + + + + + {/* Next page button */} + + + + + + {/* Last page button */} + + + + + + + +
+
+ + + ); } function RowActions({ row }: { row: Row }) { - const [isOpen, setIsOpen] = useState(false); - const [isDeleting, setIsDeleting] = useState(false); - const { deleteDocument, refreshDocuments } = useContext(DocumentsContext)!; - const document = row.original; + const [isOpen, setIsOpen] = useState(false); + const [isDeleting, setIsDeleting] = useState(false); + const { deleteDocument, refreshDocuments } = useContext(DocumentsContext)!; + const document = row.original; - const handleDelete = async () => { - setIsDeleting(true); - try { - await deleteDocument(document.id); - toast.success("Document deleted successfully"); - await refreshDocuments(); - } catch (error) { - console.error("Error deleting document:", error); - toast.error("Failed to delete document"); - } finally { - setIsDeleting(false); - setIsOpen(false); - } - }; + const handleDelete = async () => { + setIsDeleting(true); + try { + await deleteDocument(document.id); + toast.success("Document deleted successfully"); + await refreshDocuments(); + } catch (error) { + console.error("Error deleting document:", error); + toast.error("Failed to delete document"); + } finally { + setIsDeleting(false); + setIsOpen(false); + } + }; - return ( -
- - - - - - e.preventDefault()}> - View Metadata - - } - /> - - - - { - e.preventDefault(); - setIsOpen(true); - }} - > - Delete - - - - - Are you sure? - - This action cannot be undone. This will permanently delete the document. - - - - Cancel - { - e.preventDefault(); - handleDelete(); - }} - disabled={isDeleting} - > - {isDeleting ? "Deleting..." : "Delete"} - - - - - - -
- ); + return ( +
+ + + + + + e.preventDefault()}> + View Metadata + + } + /> + + + + { + e.preventDefault(); + setIsOpen(true); + }} + > + Delete + + + + + Are you sure? + + This action cannot be undone. This will permanently delete the + document. + + + + Cancel + { + e.preventDefault(); + handleDelete(); + }} + disabled={isDeleting} + > + {isDeleting ? "Deleting..." : "Delete"} + + + + + + +
+ ); } export { DocumentsTable }; - diff --git a/surfsense_web/app/dashboard/[search_space_id]/researcher/[chat_id]/page.tsx b/surfsense_web/app/dashboard/[search_space_id]/researcher/[chat_id]/page.tsx index 8a0bde7..e92db28 100644 --- a/surfsense_web/app/dashboard/[search_space_id]/researcher/[chat_id]/page.tsx +++ b/surfsense_web/app/dashboard/[search_space_id]/researcher/[chat_id]/page.tsx @@ -1,77 +1,77 @@ "use client"; import React, { - useRef, - useEffect, - useState, - useMemo, - useCallback, + useRef, + useEffect, + useState, + useMemo, + useCallback, } from "react"; import { useChat } from "@ai-sdk/react"; import { useParams } from "next/navigation"; import { - Loader2, - X, - Search, - ExternalLink, - ChevronLeft, - ChevronRight, - Check, - ArrowDown, - CircleUser, - Database, - SendHorizontal, - FileText, - Grid3x3, - FolderOpen, - Upload, - ChevronDown, - Filter, - Brain, - Zap, + Loader2, + X, + Search, + ExternalLink, + ChevronLeft, + ChevronRight, + Check, + ArrowDown, + CircleUser, + Database, + SendHorizontal, + FileText, + Grid3x3, + FolderOpen, + Upload, + ChevronDown, + Filter, + Brain, + Zap, } from "lucide-react"; import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; import { Button } from "@/components/ui/button"; import { Input } from "@/components/ui/input"; import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"; import { - Dialog, - DialogContent, - DialogDescription, - DialogHeader, - DialogTitle, - DialogTrigger, - DialogFooter, + Dialog, + DialogContent, + DialogDescription, + DialogHeader, + DialogTitle, + DialogTrigger, + DialogFooter, } from "@/components/ui/dialog"; import { - DropdownMenu, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuLabel, - DropdownMenuSeparator, - DropdownMenuTrigger, + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuLabel, + DropdownMenuSeparator, + DropdownMenuTrigger, } from "@/components/ui/dropdown-menu"; import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue, + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, } from "@/components/ui/select"; import { Badge } from "@/components/ui/badge"; import { Skeleton } from "@/components/ui/skeleton"; import { - ConnectorButton as ConnectorButtonComponent, - getConnectorIcon, - getFilteredSources as getFilteredSourcesUtil, - getPaginatedDialogSources as getPaginatedDialogSourcesUtil, - useScrollToBottom, - updateScrollIndicators as updateScrollIndicatorsUtil, - useScrollIndicators, - scrollTabsLeft as scrollTabsLeftUtil, - scrollTabsRight as scrollTabsRightUtil, - Source, - ResearchMode, - ResearchModeControl, + ConnectorButton as ConnectorButtonComponent, + getConnectorIcon, + getFilteredSources as getFilteredSourcesUtil, + getPaginatedDialogSources as getPaginatedDialogSourcesUtil, + useScrollToBottom, + updateScrollIndicators as updateScrollIndicatorsUtil, + useScrollIndicators, + scrollTabsLeft as scrollTabsLeftUtil, + scrollTabsRight as scrollTabsRightUtil, + Source, + ResearchMode, + ResearchModeControl, } from "@/components/chat"; import { MarkdownViewer } from "@/components/markdown-viewer"; import { Logo } from "@/components/Logo"; @@ -80,446 +80,447 @@ import { useDocuments } from "@/hooks/use-documents"; import { useLLMConfigs, useLLMPreferences } from "@/hooks/use-llm-configs"; interface SourceItem { - id: number; - title: string; - description: string; - url: string; - connectorType?: string; + id: number; + title: string; + description: string; + url: string; + connectorType?: string; } interface ConnectorSource { - id: number; - name: string; - type: string; - sources: SourceItem[]; + id: number; + name: string; + type: string; + sources: SourceItem[]; } type DocumentType = - | "EXTENSION" - | "CRAWLED_URL" - | "SLACK_CONNECTOR" - | "NOTION_CONNECTOR" - | "FILE" - | "YOUTUBE_VIDEO" - | "GITHUB_CONNECTOR" - | "LINEAR_CONNECTOR" - | "DISCORD_CONNECTOR"; + | "EXTENSION" + | "CRAWLED_URL" + | "SLACK_CONNECTOR" + | "NOTION_CONNECTOR" + | "FILE" + | "YOUTUBE_VIDEO" + | "GITHUB_CONNECTOR" + | "LINEAR_CONNECTOR" + | "JIRA_CONNECTOR" + | "DISCORD_CONNECTOR"; /** * Skeleton loader for document items */ const DocumentSkeleton = () => ( -
- -
- - - -
- -
+
+ +
+ + + +
+ +
); /** * Enhanced document type filter dropdown */ const DocumentTypeFilter = ({ - value, - onChange, - counts, + value, + onChange, + counts, }: { - value: DocumentType | "ALL"; - onChange: (value: DocumentType | "ALL") => void; - counts: Record; + value: DocumentType | "ALL"; + onChange: (value: DocumentType | "ALL") => void; + counts: Record; }) => { - const getTypeLabel = (type: DocumentType | "ALL") => { - if (type === "ALL") return "All Types"; - return type - .replace(/_/g, " ") - .toLowerCase() - .replace(/\b\w/g, (l) => l.toUpperCase()); - }; + const getTypeLabel = (type: DocumentType | "ALL") => { + if (type === "ALL") return "All Types"; + return type + .replace(/_/g, " ") + .toLowerCase() + .replace(/\b\w/g, (l) => l.toUpperCase()); + }; - const getTypeIcon = (type: DocumentType | "ALL") => { - if (type === "ALL") return ; - return getConnectorIcon(type); - }; + const getTypeIcon = (type: DocumentType | "ALL") => { + if (type === "ALL") return ; + return getConnectorIcon(type); + }; - return ( - - - - - - Document Types - - {Object.entries(counts).map(([type, count]) => ( - onChange(type as DocumentType | "ALL")} - className="flex items-center justify-between" - > -
- {getTypeIcon(type as DocumentType | "ALL")} - {getTypeLabel(type as DocumentType | "ALL")} -
- - {count} - -
- ))} -
-
- ); + return ( + + + + + + Document Types + + {Object.entries(counts).map(([type, count]) => ( + onChange(type as DocumentType | "ALL")} + className="flex items-center justify-between" + > +
+ {getTypeIcon(type as DocumentType | "ALL")} + {getTypeLabel(type as DocumentType | "ALL")} +
+ + {count} + +
+ ))} +
+
+ ); }; /** * Button that displays selected connectors and opens connector selection dialog */ const ConnectorButton = ({ - selectedConnectors, - onClick, + selectedConnectors, + onClick, }: { - selectedConnectors: string[]; - onClick: () => void; + selectedConnectors: string[]; + onClick: () => void; }) => { - const { connectorSourceItems } = useSearchSourceConnectors(); + const { connectorSourceItems } = useSearchSourceConnectors(); - return ( - - ); + return ( + + ); }; /** * Button that displays selected documents count and opens document selection dialog */ const DocumentSelectorButton = ({ - selectedDocuments, - onClick, - documentsCount, + selectedDocuments, + onClick, + documentsCount, }: { - selectedDocuments: number[]; - onClick: () => void; - documentsCount: number; + selectedDocuments: number[]; + onClick: () => void; + documentsCount: number; }) => { - return ( -
- - {selectedDocuments.length > 0 && ( - - {selectedDocuments.length > 99 ? "99+" : selectedDocuments.length} - - )} - {selectedDocuments.length === 0 && ( - - 0 - - )} -
- ); + return ( +
+ + {selectedDocuments.length > 0 && ( + + {selectedDocuments.length > 99 ? "99+" : selectedDocuments.length} + + )} + {selectedDocuments.length === 0 && ( + + 0 + + )} +
+ ); }; // Create a wrapper component for the sources dialog content const SourcesDialogContent = ({ - connector, - sourceFilter, - expandedSources, - sourcesPage, - setSourcesPage, - setSourceFilter, - setExpandedSources, - isLoadingMore, + connector, + sourceFilter, + expandedSources, + sourcesPage, + setSourcesPage, + setSourceFilter, + setExpandedSources, + isLoadingMore, }: { - connector: any; - sourceFilter: string; - expandedSources: boolean; - sourcesPage: number; - setSourcesPage: React.Dispatch>; - setSourceFilter: React.Dispatch>; - setExpandedSources: React.Dispatch>; - isLoadingMore: boolean; + connector: any; + sourceFilter: string; + expandedSources: boolean; + sourcesPage: number; + setSourcesPage: React.Dispatch>; + setSourceFilter: React.Dispatch>; + setExpandedSources: React.Dispatch>; + isLoadingMore: boolean; }) => { - // Safely access sources with fallbacks - const sources = connector?.sources || []; + // Safely access sources with fallbacks + const sources = connector?.sources || []; - // Safe versions of utility functions - const getFilteredSourcesSafe = () => { - if (!sources.length) return []; - return getFilteredSourcesUtil(connector, sourceFilter); - }; + // Safe versions of utility functions + const getFilteredSourcesSafe = () => { + if (!sources.length) return []; + return getFilteredSourcesUtil(connector, sourceFilter); + }; - const getPaginatedSourcesSafe = () => { - if (!sources.length) return []; - return getPaginatedDialogSourcesUtil( - connector, - sourceFilter, - expandedSources, - sourcesPage, - 5, // SOURCES_PER_PAGE - ); - }; + const getPaginatedSourcesSafe = () => { + if (!sources.length) return []; + return getPaginatedDialogSourcesUtil( + connector, + sourceFilter, + expandedSources, + sourcesPage, + 5, // SOURCES_PER_PAGE + ); + }; - const filteredSources = getFilteredSourcesSafe() || []; - const paginatedSources = getPaginatedSourcesSafe() || []; + const filteredSources = getFilteredSourcesSafe() || []; + const paginatedSources = getPaginatedSourcesSafe() || []; - // Description text - const descriptionText = sourceFilter - ? `Found ${filteredSources.length} sources matching "${sourceFilter}"` - : `Viewing ${paginatedSources.length} of ${sources.length} sources`; + // Description text + const descriptionText = sourceFilter + ? `Found ${filteredSources.length} sources matching "${sourceFilter}"` + : `Viewing ${paginatedSources.length} of ${sources.length} sources`; - if (paginatedSources.length === 0) { - return ( -
- -

No sources found matching "{sourceFilter}"

- -
- ); - } + if (paginatedSources.length === 0) { + return ( +
+ +

No sources found matching "{sourceFilter}"

+ +
+ ); + } - return ( - <> - - - {getConnectorIcon(connector.type)} - {connector.name} Sources - - - {descriptionText} - - + return ( + <> + + + {getConnectorIcon(connector.type)} + {connector.name} Sources + + + {descriptionText} + + -
- - { - setSourceFilter(e.target.value); - setSourcesPage(1); - setExpandedSources(false); - }} - /> - {sourceFilter && ( - - )} -
+
+ + { + setSourceFilter(e.target.value); + setSourcesPage(1); + setExpandedSources(false); + }} + /> + {sourceFilter && ( + + )} +
-
- {paginatedSources.map((source: any, index: number) => ( - -
-
- {getConnectorIcon(connector.type)} -
-
-

{source.title}

-

- {source.description} -

-
- -
-
- ))} +
+ {paginatedSources.map((source: any, index: number) => ( + +
+
+ {getConnectorIcon(connector.type)} +
+
+

{source.title}

+

+ {source.description} +

+
+ +
+
+ ))} - {!expandedSources && - paginatedSources.length < filteredSources.length && ( - - )} + {!expandedSources && + paginatedSources.length < filteredSources.length && ( + + )} - {expandedSources && filteredSources.length > 10 && ( -
- Showing all {filteredSources.length} sources -
- )} -
- - ); + {expandedSources && filteredSources.length > 10 && ( +
+ Showing all {filteredSources.length} sources +
+ )} +
+ + ); }; const ChatPage = () => { - const [token, setToken] = React.useState(null); - const [dialogOpenId, setDialogOpenId] = useState(null); - const [sourcesPage, setSourcesPage] = useState(1); - const [expandedSources, setExpandedSources] = useState(false); - const [canScrollLeft, setCanScrollLeft] = useState(false); - const [canScrollRight, setCanScrollRight] = useState(true); - const [sourceFilter, setSourceFilter] = useState(""); - const tabsListRef = useRef(null); - const [terminalExpanded, setTerminalExpanded] = useState(false); - const [selectedConnectors, setSelectedConnectors] = useState([]); - const [searchMode, setSearchMode] = useState<"DOCUMENTS" | "CHUNKS">( - "DOCUMENTS", - ); - const [researchMode, setResearchMode] = useState("QNA"); - const [currentTime, setCurrentTime] = useState(""); - const [currentDate, setCurrentDate] = useState(""); - const terminalMessagesRef = useRef(null); - const { connectorSourceItems, isLoading: isLoadingConnectors } = - useSearchSourceConnectors(); - const { llmConfigs } = useLLMConfigs(); - const { preferences, updatePreferences } = useLLMPreferences(); + const [token, setToken] = React.useState(null); + const [dialogOpenId, setDialogOpenId] = useState(null); + const [sourcesPage, setSourcesPage] = useState(1); + const [expandedSources, setExpandedSources] = useState(false); + const [canScrollLeft, setCanScrollLeft] = useState(false); + const [canScrollRight, setCanScrollRight] = useState(true); + const [sourceFilter, setSourceFilter] = useState(""); + const tabsListRef = useRef(null); + const [terminalExpanded, setTerminalExpanded] = useState(false); + const [selectedConnectors, setSelectedConnectors] = useState([]); + const [searchMode, setSearchMode] = useState<"DOCUMENTS" | "CHUNKS">( + "DOCUMENTS", + ); + const [researchMode, setResearchMode] = useState("QNA"); + const [currentTime, setCurrentTime] = useState(""); + const [currentDate, setCurrentDate] = useState(""); + const terminalMessagesRef = useRef(null); + const { connectorSourceItems, isLoading: isLoadingConnectors } = + useSearchSourceConnectors(); + const { llmConfigs } = useLLMConfigs(); + const { preferences, updatePreferences } = useLLMPreferences(); - const INITIAL_SOURCES_DISPLAY = 3; + const INITIAL_SOURCES_DISPLAY = 3; - const { search_space_id, chat_id } = useParams(); + const { search_space_id, chat_id } = useParams(); - // Document selection state - const [selectedDocuments, setSelectedDocuments] = useState([]); - const [documentFilter, setDocumentFilter] = useState(""); - const [debouncedDocumentFilter, setDebouncedDocumentFilter] = useState(""); - const [documentTypeFilter, setDocumentTypeFilter] = useState< - DocumentType | "ALL" - >("ALL"); - const [documentsPage, setDocumentsPage] = useState(1); - const [documentsPerPage] = useState(10); - const { - documents, - loading: isLoadingDocuments, - error: documentsError, - } = useDocuments(Number(search_space_id)); + // Document selection state + const [selectedDocuments, setSelectedDocuments] = useState([]); + const [documentFilter, setDocumentFilter] = useState(""); + const [debouncedDocumentFilter, setDebouncedDocumentFilter] = useState(""); + const [documentTypeFilter, setDocumentTypeFilter] = useState< + DocumentType | "ALL" + >("ALL"); + const [documentsPage, setDocumentsPage] = useState(1); + const [documentsPerPage] = useState(10); + const { + documents, + loading: isLoadingDocuments, + error: documentsError, + } = useDocuments(Number(search_space_id)); - // Debounced search effect (proper implementation) - useEffect(() => { - const handler = setTimeout(() => { - setDebouncedDocumentFilter(documentFilter); - setDocumentsPage(1); // Reset page when search changes - }, 300); + // Debounced search effect (proper implementation) + useEffect(() => { + const handler = setTimeout(() => { + setDebouncedDocumentFilter(documentFilter); + setDocumentsPage(1); // Reset page when search changes + }, 300); - return () => { - clearTimeout(handler); - }; - }, [documentFilter]); + return () => { + clearTimeout(handler); + }; + }, [documentFilter]); - // Memoized filtered and paginated documents - const filteredDocuments = useMemo(() => { - if (!documents) return []; + // Memoized filtered and paginated documents + const filteredDocuments = useMemo(() => { + if (!documents) return []; - return documents.filter((doc) => { - const matchesSearch = - doc.title - .toLowerCase() - .includes(debouncedDocumentFilter.toLowerCase()) || - doc.content - .toLowerCase() - .includes(debouncedDocumentFilter.toLowerCase()); - const matchesType = - documentTypeFilter === "ALL" || - doc.document_type === documentTypeFilter; - return matchesSearch && matchesType; - }); - }, [documents, debouncedDocumentFilter, documentTypeFilter]); + return documents.filter((doc) => { + const matchesSearch = + doc.title + .toLowerCase() + .includes(debouncedDocumentFilter.toLowerCase()) || + doc.content + .toLowerCase() + .includes(debouncedDocumentFilter.toLowerCase()); + const matchesType = + documentTypeFilter === "ALL" || + doc.document_type === documentTypeFilter; + return matchesSearch && matchesType; + }); + }, [documents, debouncedDocumentFilter, documentTypeFilter]); - const paginatedDocuments = useMemo(() => { - const startIndex = (documentsPage - 1) * documentsPerPage; - return filteredDocuments.slice(startIndex, startIndex + documentsPerPage); - }, [filteredDocuments, documentsPage, documentsPerPage]); + const paginatedDocuments = useMemo(() => { + const startIndex = (documentsPage - 1) * documentsPerPage; + return filteredDocuments.slice(startIndex, startIndex + documentsPerPage); + }, [filteredDocuments, documentsPage, documentsPerPage]); - const totalPages = Math.ceil(filteredDocuments.length / documentsPerPage); + const totalPages = Math.ceil(filteredDocuments.length / documentsPerPage); - // Document type counts for filter dropdown - const documentTypeCounts = useMemo(() => { - if (!documents) return {}; + // Document type counts for filter dropdown + const documentTypeCounts = useMemo(() => { + if (!documents) return {}; - const counts: Record = { ALL: documents.length }; - documents.forEach((doc) => { - counts[doc.document_type] = (counts[doc.document_type] || 0) + 1; - }); - return counts; - }, [documents]); + const counts: Record = { ALL: documents.length }; + documents.forEach((doc) => { + counts[doc.document_type] = (counts[doc.document_type] || 0) + 1; + }); + return counts; + }, [documents]); - // Callback to handle document selection - const handleDocumentToggle = useCallback((documentId: number) => { - setSelectedDocuments((prev) => - prev.includes(documentId) - ? prev.filter((id) => id !== documentId) - : [...prev, documentId], - ); - }, []); + // Callback to handle document selection + const handleDocumentToggle = useCallback((documentId: number) => { + setSelectedDocuments((prev) => + prev.includes(documentId) + ? prev.filter((id) => id !== documentId) + : [...prev, documentId], + ); + }, []); - // Function to scroll terminal to bottom - const scrollTerminalToBottom = () => { - if (terminalMessagesRef.current) { - terminalMessagesRef.current.scrollTop = - terminalMessagesRef.current.scrollHeight; - } - }; + // Function to scroll terminal to bottom + const scrollTerminalToBottom = () => { + if (terminalMessagesRef.current) { + terminalMessagesRef.current.scrollTop = + terminalMessagesRef.current.scrollHeight; + } + }; - // Get token from localStorage on client side only - React.useEffect(() => { - setToken(localStorage.getItem("surfsense_bearer_token")); - }, []); + // Get token from localStorage on client side only + React.useEffect(() => { + setToken(localStorage.getItem("surfsense_bearer_token")); + }, []); - // Set the current time only on the client side after initial render - useEffect(() => { - setCurrentDate(new Date().toISOString().split("T")[0]); - setCurrentTime(new Date().toTimeString().split(" ")[0]); - }, []); + // Set the current time only on the client side after initial render + useEffect(() => { + setCurrentDate(new Date().toISOString().split("T")[0]); + setCurrentTime(new Date().toTimeString().split(" ")[0]); + }, []); - // Add this CSS to remove input shadow and improve the UI - useEffect(() => { - if (typeof document !== "undefined") { - const style = document.createElement("style"); - style.innerHTML = ` + // Add this CSS to remove input shadow and improve the UI + useEffect(() => { + if (typeof document !== "undefined") { + const style = document.createElement("style"); + style.innerHTML = ` .no-shadow-input { box-shadow: none !important; } @@ -617,825 +618,860 @@ const ChatPage = () => { background: hsl(var(--muted-foreground) / 0.5); } `; - document.head.appendChild(style); + document.head.appendChild(style); - return () => { - document.head.removeChild(style); - }; - } - }, []); + return () => { + document.head.removeChild(style); + }; + } + }, []); - const { - messages, - input, - handleInputChange, - handleSubmit: handleChatSubmit, - status, - setMessages, - } = useChat({ - api: `${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/chat`, - streamProtocol: "data", - headers: { - ...(token && { Authorization: `Bearer ${token}` }), - }, - body: { - data: { - search_space_id: search_space_id, - selected_connectors: selectedConnectors, - research_mode: researchMode, - search_mode: searchMode, - document_ids_to_add_in_context: selectedDocuments, - }, - }, - onError: (error) => { - console.error("Chat error:", error); - // You can add additional error handling here if needed - }, - }); + const { + messages, + input, + handleInputChange, + handleSubmit: handleChatSubmit, + status, + setMessages, + } = useChat({ + api: `${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/chat`, + streamProtocol: "data", + headers: { + ...(token && { Authorization: `Bearer ${token}` }), + }, + body: { + data: { + search_space_id: search_space_id, + selected_connectors: selectedConnectors, + research_mode: researchMode, + search_mode: searchMode, + document_ids_to_add_in_context: selectedDocuments, + }, + }, + onError: (error) => { + console.error("Chat error:", error); + // You can add additional error handling here if needed + }, + }); - // Fetch chat details when component mounts - useEffect(() => { - const fetchChatDetails = async () => { - try { - if (!token) return; // Wait for token to be set + // Fetch chat details when component mounts + useEffect(() => { + const fetchChatDetails = async () => { + try { + if (!token) return; // Wait for token to be set - // console.log('Fetching chat details for chat ID:', chat_id); + // console.log('Fetching chat details for chat ID:', chat_id); - const response = await fetch( - `${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/chats/${Number(chat_id)}`, - { - method: "GET", - headers: { - "Content-Type": "application/json", - Authorization: `Bearer ${token}`, - }, - }, - ); + const response = await fetch( + `${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/chats/${Number(chat_id)}`, + { + method: "GET", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${token}`, + }, + }, + ); - if (!response.ok) { - throw new Error( - `Failed to fetch chat details: ${response.statusText}`, - ); - } + if (!response.ok) { + throw new Error( + `Failed to fetch chat details: ${response.statusText}`, + ); + } - const chatData = await response.json(); - // console.log('Chat details fetched:', chatData); + const chatData = await response.json(); + // console.log('Chat details fetched:', chatData); - // Set research mode from chat data - if (chatData.type) { - setResearchMode(chatData.type as ResearchMode); - } + // Set research mode from chat data + if (chatData.type) { + setResearchMode(chatData.type as ResearchMode); + } - // Set connectors from chat data - if ( - chatData.initial_connectors && - Array.isArray(chatData.initial_connectors) - ) { - setSelectedConnectors(chatData.initial_connectors); - } + // Set connectors from chat data + if ( + chatData.initial_connectors && + Array.isArray(chatData.initial_connectors) + ) { + setSelectedConnectors(chatData.initial_connectors); + } - // Set messages from chat data - if (chatData.messages && Array.isArray(chatData.messages)) { - setMessages(chatData.messages); - } - } catch (err) { - console.error("Error fetching chat details:", err); - } - }; + // Set messages from chat data + if (chatData.messages && Array.isArray(chatData.messages)) { + setMessages(chatData.messages); + } + } catch (err) { + console.error("Error fetching chat details:", err); + } + }; - if (token) { - fetchChatDetails(); - } - }, [token, chat_id, setMessages]); + if (token) { + fetchChatDetails(); + } + }, [token, chat_id, setMessages]); - // Update chat when a conversation exchange is complete - useEffect(() => { - const updateChat = async () => { - try { - // Only update when: - // 1. Status is ready (not loading) - // 2. We have messages - // 3. Last message is from assistant (completed response) - if ( - status === "ready" && - messages.length > 0 && - messages[messages.length - 1]?.role === "assistant" - ) { - const token = localStorage.getItem("surfsense_bearer_token"); - if (!token) return; + // Update chat when a conversation exchange is complete + useEffect(() => { + const updateChat = async () => { + try { + // Only update when: + // 1. Status is ready (not loading) + // 2. We have messages + // 3. Last message is from assistant (completed response) + if ( + status === "ready" && + messages.length > 0 && + messages[messages.length - 1]?.role === "assistant" + ) { + const token = localStorage.getItem("surfsense_bearer_token"); + if (!token) return; - // Find the first user message to use as title - const userMessages = messages.filter((msg) => msg.role === "user"); - if (userMessages.length === 0) return; + // Find the first user message to use as title + const userMessages = messages.filter((msg) => msg.role === "user"); + if (userMessages.length === 0) return; - // Use the first user message as the title - const title = userMessages[0].content; + // Use the first user message as the title + const title = userMessages[0].content; - // console.log('Updating chat with title:', title); + // console.log('Updating chat with title:', title); - // Update the chat - const response = await fetch( - `${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/chats/${Number(chat_id)}`, - { - method: "PUT", - headers: { - "Content-Type": "application/json", - Authorization: `Bearer ${token}`, - }, - body: JSON.stringify({ - type: researchMode, - title: title, - initial_connectors: selectedConnectors, - messages: messages, - search_space_id: Number(search_space_id), - }), - }, - ); + // Update the chat + const response = await fetch( + `${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/chats/${Number(chat_id)}`, + { + method: "PUT", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${token}`, + }, + body: JSON.stringify({ + type: researchMode, + title: title, + initial_connectors: selectedConnectors, + messages: messages, + search_space_id: Number(search_space_id), + }), + }, + ); - if (!response.ok) { - throw new Error(`Failed to update chat: ${response.statusText}`); - } + if (!response.ok) { + throw new Error(`Failed to update chat: ${response.statusText}`); + } - // console.log('Chat updated successfully'); - } - } catch (err) { - console.error("Error updating chat:", err); - } - }; + // console.log('Chat updated successfully'); + } + } catch (err) { + console.error("Error updating chat:", err); + } + }; - updateChat(); - }, [ - messages, - status, - chat_id, - researchMode, - selectedConnectors, - search_space_id, - ]); + updateChat(); + }, [ + messages, + status, + chat_id, + researchMode, + selectedConnectors, + search_space_id, + ]); - // Check and scroll terminal when terminal info is available - useEffect(() => { - // Modified to trigger during streaming as well (removed status check) - if (messages.length === 0) return; + // Check and scroll terminal when terminal info is available + useEffect(() => { + // Modified to trigger during streaming as well (removed status check) + if (messages.length === 0) return; - // Find the latest assistant message - const assistantMessages = messages.filter( - (msg) => msg.role === "assistant", - ); - if (assistantMessages.length === 0) return; + // Find the latest assistant message + const assistantMessages = messages.filter( + (msg) => msg.role === "assistant", + ); + if (assistantMessages.length === 0) return; - const latestAssistantMessage = - assistantMessages[assistantMessages.length - 1]; - if (!latestAssistantMessage?.annotations) return; + const latestAssistantMessage = + assistantMessages[assistantMessages.length - 1]; + if (!latestAssistantMessage?.annotations) return; - // Check for terminal info annotations - const annotations = latestAssistantMessage.annotations as any[]; - const terminalInfoAnnotations = annotations.filter( - (a) => a.type === "TERMINAL_INFO", - ); + // Check for terminal info annotations + const annotations = latestAssistantMessage.annotations as any[]; + const terminalInfoAnnotations = annotations.filter( + (a) => a.type === "TERMINAL_INFO", + ); - if (terminalInfoAnnotations.length > 0) { - // Always scroll to bottom when terminal info is updated, even during streaming - scrollTerminalToBottom(); - } - }, [messages]); // Removed status from dependencies to ensure it triggers during streaming + if (terminalInfoAnnotations.length > 0) { + // Always scroll to bottom when terminal info is updated, even during streaming + scrollTerminalToBottom(); + } + }, [messages]); // Removed status from dependencies to ensure it triggers during streaming - // Pure function to get connector sources for a specific message - const getMessageConnectorSources = (message: any): any[] => { - if (!message || message.role !== "assistant" || !message.annotations) - return []; + // Pure function to get connector sources for a specific message + const getMessageConnectorSources = (message: any): any[] => { + if (!message || message.role !== "assistant" || !message.annotations) + return []; - // Find all SOURCES annotations - const annotations = message.annotations as any[]; - const sourcesAnnotations = annotations.filter((a) => a.type === "SOURCES"); + // Find all SOURCES annotations + const annotations = message.annotations as any[]; + const sourcesAnnotations = annotations.filter((a) => a.type === "SOURCES"); - // Get the latest SOURCES annotation - if (sourcesAnnotations.length === 0) return []; - const latestSourcesAnnotation = - sourcesAnnotations[sourcesAnnotations.length - 1]; + // Get the latest SOURCES annotation + if (sourcesAnnotations.length === 0) return []; + const latestSourcesAnnotation = + sourcesAnnotations[sourcesAnnotations.length - 1]; - if (!latestSourcesAnnotation.content) return []; + if (!latestSourcesAnnotation.content) return []; - return latestSourcesAnnotation.content; - }; + return latestSourcesAnnotation.content; + }; - // Custom handleSubmit function to include selected connectors and answer type - const handleSubmit = (e: React.FormEvent) => { - e.preventDefault(); + // Custom handleSubmit function to include selected connectors and answer type + const handleSubmit = (e: React.FormEvent) => { + e.preventDefault(); - if (!input.trim() || status !== "ready") return; + if (!input.trim() || status !== "ready") return; - // Validation: require at least one connector OR at least one document - // Note: Fast LLM selection updates user preferences automatically - // if (selectedConnectors.length === 0 && selectedDocuments.length === 0) { - // alert("Please select at least one connector or document"); - // return; - // } + // Validation: require at least one connector OR at least one document + // Note: Fast LLM selection updates user preferences automatically + // if (selectedConnectors.length === 0 && selectedDocuments.length === 0) { + // alert("Please select at least one connector or document"); + // return; + // } - // Call the original handleSubmit from useChat - handleChatSubmit(e); - }; + // Call the original handleSubmit from useChat + handleChatSubmit(e); + }; - // Reference to the messages container for auto-scrolling - const messagesEndRef = useRef(null); + // Reference to the messages container for auto-scrolling + const messagesEndRef = useRef(null); - // Function to scroll to bottom - const scrollToBottom = () => { - messagesEndRef.current?.scrollIntoView({ behavior: "smooth" }); - }; + // Function to scroll to bottom + const scrollToBottom = () => { + messagesEndRef.current?.scrollIntoView({ behavior: "smooth" }); + }; - // Scroll to bottom when messages change - useEffect(() => { - scrollToBottom(); - }, [messages]); + // Scroll to bottom when messages change + useEffect(() => { + scrollToBottom(); + }, [messages]); - // Reset sources page when new messages arrive - useEffect(() => { - // Reset pagination when we get new messages - setSourcesPage(1); - setExpandedSources(false); - }, [messages]); + // Reset sources page when new messages arrive + useEffect(() => { + // Reset pagination when we get new messages + setSourcesPage(1); + setExpandedSources(false); + }, [messages]); - // Scroll terminal to bottom when expanded - useEffect(() => { - if (terminalExpanded) { - setTimeout(scrollTerminalToBottom, 300); // Wait for transition to complete - } - }, [terminalExpanded]); + // Scroll terminal to bottom when expanded + useEffect(() => { + if (terminalExpanded) { + setTimeout(scrollTerminalToBottom, 300); // Wait for transition to complete + } + }, [terminalExpanded]); - // Function to check scroll position and update indicators - const updateScrollIndicators = () => { - updateScrollIndicatorsUtil( - tabsListRef as React.RefObject, - setCanScrollLeft, - setCanScrollRight, - ); - }; + // Function to check scroll position and update indicators + const updateScrollIndicators = () => { + updateScrollIndicatorsUtil( + tabsListRef as React.RefObject, + setCanScrollLeft, + setCanScrollRight, + ); + }; - // Initialize scroll indicators - const updateIndicators = useScrollIndicators( - tabsListRef as React.RefObject, - setCanScrollLeft, - setCanScrollRight, - ); + // Initialize scroll indicators + const updateIndicators = useScrollIndicators( + tabsListRef as React.RefObject, + setCanScrollLeft, + setCanScrollRight, + ); - // Function to scroll tabs list left - const scrollTabsLeft = () => { - scrollTabsLeftUtil( - tabsListRef as React.RefObject, - updateIndicators, - ); - }; + // Function to scroll tabs list left + const scrollTabsLeft = () => { + scrollTabsLeftUtil( + tabsListRef as React.RefObject, + updateIndicators, + ); + }; - // Function to scroll tabs list right - const scrollTabsRight = () => { - scrollTabsRightUtil( - tabsListRef as React.RefObject, - updateIndicators, - ); - }; + // Function to scroll tabs list right + const scrollTabsRight = () => { + scrollTabsRightUtil( + tabsListRef as React.RefObject, + updateIndicators, + ); + }; - // Use the scroll to bottom hook - useScrollToBottom(messagesEndRef as React.RefObject, [ - messages, - ]); + // Use the scroll to bottom hook + useScrollToBottom(messagesEndRef as React.RefObject, [ + messages, + ]); - // Function to get a citation source by ID - const getCitationSource = React.useCallback( - (citationId: number, messageIndex?: number): Source | null => { - if (!messages || messages.length === 0) return null; + // Function to get a citation source by ID + const getCitationSource = React.useCallback( + (citationId: number, messageIndex?: number): Source | null => { + if (!messages || messages.length === 0) return null; - // If no specific message index is provided, use the latest assistant message - if (messageIndex === undefined) { - // Find the latest assistant message - const assistantMessages = messages.filter( - (msg) => msg.role === "assistant", - ); - if (assistantMessages.length === 0) return null; + // If no specific message index is provided, use the latest assistant message + if (messageIndex === undefined) { + // Find the latest assistant message + const assistantMessages = messages.filter( + (msg) => msg.role === "assistant", + ); + if (assistantMessages.length === 0) return null; - const latestAssistantMessage = - assistantMessages[assistantMessages.length - 1]; + const latestAssistantMessage = + assistantMessages[assistantMessages.length - 1]; - // Use our helper function to get sources - const sources = getMessageConnectorSources(latestAssistantMessage); - if (sources.length === 0) return null; + // Use our helper function to get sources + const sources = getMessageConnectorSources(latestAssistantMessage); + if (sources.length === 0) return null; - // Flatten all sources from all connectors - const allSources: Source[] = []; - sources.forEach((connector: ConnectorSource) => { - if (connector.sources && Array.isArray(connector.sources)) { - connector.sources.forEach((source: SourceItem) => { - allSources.push({ - id: source.id, - title: source.title, - description: source.description, - url: source.url, - connectorType: connector.type, - }); - }); - } - }); + // Flatten all sources from all connectors + const allSources: Source[] = []; + sources.forEach((connector: ConnectorSource) => { + if (connector.sources && Array.isArray(connector.sources)) { + connector.sources.forEach((source: SourceItem) => { + allSources.push({ + id: source.id, + title: source.title, + description: source.description, + url: source.url, + connectorType: connector.type, + }); + }); + } + }); - // Find the source with the matching ID - const foundSource = allSources.find( - (source) => source.id === citationId, - ); + // Find the source with the matching ID + const foundSource = allSources.find( + (source) => source.id === citationId, + ); - return foundSource || null; - } else { - // Use the specific message by index - const message = messages[messageIndex]; + return foundSource || null; + } else { + // Use the specific message by index + const message = messages[messageIndex]; - // Use our helper function to get sources - const sources = getMessageConnectorSources(message); - if (sources.length === 0) return null; + // Use our helper function to get sources + const sources = getMessageConnectorSources(message); + if (sources.length === 0) return null; - // Flatten all sources from all connectors - const allSources: Source[] = []; - sources.forEach((connector: ConnectorSource) => { - if (connector.sources && Array.isArray(connector.sources)) { - connector.sources.forEach((source: SourceItem) => { - allSources.push({ - id: source.id, - title: source.title, - description: source.description, - url: source.url, - connectorType: connector.type, - }); - }); - } - }); + // Flatten all sources from all connectors + const allSources: Source[] = []; + sources.forEach((connector: ConnectorSource) => { + if (connector.sources && Array.isArray(connector.sources)) { + connector.sources.forEach((source: SourceItem) => { + allSources.push({ + id: source.id, + title: source.title, + description: source.description, + url: source.url, + connectorType: connector.type, + }); + }); + } + }); - // Find the source with the matching ID - const foundSource = allSources.find( - (source) => source.id === citationId, - ); + // Find the source with the matching ID + const foundSource = allSources.find( + (source) => source.id === citationId, + ); - return foundSource || null; - } - }, - [messages], - ); + return foundSource || null; + } + }, + [messages], + ); - // Pure function for rendering terminal content - no hooks allowed here - const renderTerminalContent = (message: any) => { - if (!message.annotations) return null; + // Pure function for rendering terminal content - no hooks allowed here + const renderTerminalContent = (message: any) => { + if (!message.annotations) return null; - // Get all TERMINAL_INFO annotations content - const terminalInfoAnnotations = (message.annotations as any[]).map(item => { - if(item.type === "TERMINAL_INFO") { - return item.content.map((a: any) => a.text) - - } - }).flat().filter(Boolean) + // Get all TERMINAL_INFO annotations content + const terminalInfoAnnotations = (message.annotations as any[]) + .map((item) => { + if (item.type === "TERMINAL_INFO") { + return item.content.map((a: any) => a.text); + } + }) + .flat() + .filter(Boolean); - // Render the content of the latest TERMINAL_INFO annotation - return terminalInfoAnnotations.map((item: any, idx: number) => ( -
- - [{String(idx).padStart(2, "0")}: - {String(Math.floor(idx * 2)).padStart(2, "0")}] - - {">"} - ( +
+ + [{String(idx).padStart(2, "0")}: + {String(Math.floor(idx * 2)).padStart(2, "0")}] + + {">"} + - {item} - -
- )); - }; + > + {item} +
+
+ )); + }; - return ( - <> -
- {messages.length === 0 && ( -

- -
- Surf{""} -
-
- Sense -
-
-
-

- )} - {messages?.map((message, index) => { - if (message.role === "user") { - return ( -
- -
- - - getCitationSource(id, index)} - className="text-sm" - /> - - -
-
- ); - } + return ( + <> +
+ {messages.length === 0 && ( +

+ +
+ Surf{""} +
+
+ Sense +
+
+
+

+ )} + {messages?.map((message, index) => { + if (message.role === "user") { + return ( +
+ +
+ + + getCitationSource(id, index)} + className="text-sm" + /> + + +
+
+ ); + } - if (message.role === "assistant") { - return ( -
- - - - Answer - - - - {/* Status Messages Section */} - -
-
-
-
setTerminalExpanded(false)} - >
-
-
setTerminalExpanded(true)} - >
-
- - surfsense-research-terminal - -
-
+ if (message.role === "assistant") { + return ( +
+ + + + Answer + + + + {/* Status Messages Section */} + +
+
+
+
setTerminalExpanded(false)} + >
+
+
setTerminalExpanded(true)} + >
+
+ + surfsense-research-terminal + +
+
-
-
- Last login: {currentDate} {currentTime} -
-
- - researcher@surfsense - - : - ~/research - $ - surfsense-researcher -
+
+
+ Last login: {currentDate} {currentTime} +
+
+ + researcher@surfsense + + : + ~/research + $ + surfsense-researcher +
- {renderTerminalContent(message)} + {renderTerminalContent(message)} -
- - [00:13] - - - researcher@surfsense - - : - ~/research - $ -
-
+
+ + [00:13] + + + researcher@surfsense + + : + ~/research + $ +
+
- {/* Terminal scroll button */} -
- -
-
- + {/* Terminal scroll button */} +
+ +
+
+
- {/* Sources Section with Connector Tabs */} -
-
- - Sources -
+ {/* Sources Section with Connector Tabs */} +
+
+ + Sources +
- {(() => { - // Get sources for this specific message - const messageConnectorSources = - getMessageConnectorSources(message); + {(() => { + // Get sources for this specific message + const messageConnectorSources = + getMessageConnectorSources(message); - if (messageConnectorSources.length === 0) { - return ( -
- -
- ); - } + if (messageConnectorSources.length === 0) { + return ( +
+ +
+ ); + } - // Use these message-specific sources for the Tabs component - return ( - 0 - ? messageConnectorSources[0].type - : undefined - } - className="w-full" - > -
-
- + // Use these message-specific sources for the Tabs component + return ( + 0 + ? messageConnectorSources[0].type + : undefined + } + className="w-full" + > +
+
+ -
-
- - {messageConnectorSources.map( - (connector) => ( - - {getConnectorIcon(connector.type)} - - {connector.name.split(" ")[0]} - - - {connector.sources?.length || 0} - - - ), - )} - -
-
+
+
+ + {messageConnectorSources.map( + (connector) => ( + + {getConnectorIcon(connector.type)} + + {connector.name.split(" ")[0]} + + + {connector.sources?.length || 0} + + + ), + )} + +
+
- -
-
+ +
+
- {messageConnectorSources.map((connector) => ( - -
- {connector.sources - ?.slice(0, INITIAL_SOURCES_DISPLAY) - ?.map((source: any, index: number) => ( - -
-
- {getConnectorIcon(connector.type)} -
-
-

- {source.title} -

-

- {source.description} -

-
- -
-
- ))} + {messageConnectorSources.map((connector) => ( + +
+ {connector.sources + ?.slice(0, INITIAL_SOURCES_DISPLAY) + ?.map((source: any, index: number) => ( + +
+
+ {getConnectorIcon(connector.type)} +
+
+

+ {source.title} +

+

+ {source.description} +

+
+ +
+
+ ))} - {connector.sources?.length > - INITIAL_SOURCES_DISPLAY && ( - - setDialogOpenId( - open ? connector.id : null, - ) - } - > - - - - - - - - )} -
-
- ))} - - ); - })()} -
+ {connector.sources?.length > + INITIAL_SOURCES_DISPLAY && ( + + setDialogOpenId( + open ? connector.id : null, + ) + } + > + + + + + + + + )} +
+ + ))} + + ); + })()} +
- {/* Answer Section */} -
- { -
- {message.annotations && - (() => { - // Get all ANSWER annotations - const answerAnnotations = ( - message.annotations as any[] - ).filter((a) => a.type === "ANSWER"); + {/* Answer Section */} +
+ { +
+ {message.annotations && + (() => { + // Get all ANSWER annotations + const answerAnnotations = ( + message.annotations as any[] + ).filter((a) => a.type === "ANSWER"); - // Get the latest ANSWER annotation - const latestAnswer = - answerAnnotations.length > 0 - ? answerAnnotations[ - answerAnnotations.length - 1 - ] - : null; + // Get the latest ANSWER annotation + const latestAnswer = + answerAnnotations.length > 0 + ? answerAnnotations[ + answerAnnotations.length - 1 + ] + : null; - // If we have a latest ANSWER annotation with content, render it - if ( - latestAnswer?.content && - latestAnswer.content.length > 0 - ) { - return ( - - getCitationSource(id, index) - } - type="ai" - /> - ); - } + // If we have a latest ANSWER annotation with content, render it + if ( + latestAnswer?.content && + latestAnswer.content.length > 0 + ) { + return ( + + getCitationSource(id, index) + } + type="ai" + /> + ); + } - // Fallback to the message content if no ANSWER annotation is available - return getCitationSource(id, index)} - type="ai" - />; - })()} + // Fallback to the message content if no ANSWER annotation is available + return ( + + getCitationSource(id, index) + } + type="ai" + /> + ); + })()}
}
{/* Further Questions Section */} - {message.annotations && (() => { - // Get all FURTHER_QUESTIONS annotations - const furtherQuestionsAnnotations = (message.annotations as any[]) - .filter(a => a.type === 'FURTHER_QUESTIONS'); + {message.annotations && + (() => { + // Get all FURTHER_QUESTIONS annotations + const furtherQuestionsAnnotations = ( + message.annotations as any[] + ).filter((a) => a.type === "FURTHER_QUESTIONS"); - // Get the latest FURTHER_QUESTIONS annotation - const latestFurtherQuestions = furtherQuestionsAnnotations.length > 0 - ? furtherQuestionsAnnotations[furtherQuestionsAnnotations.length - 1] - : null; + // Get the latest FURTHER_QUESTIONS annotation + const latestFurtherQuestions = + furtherQuestionsAnnotations.length > 0 + ? furtherQuestionsAnnotations[ + furtherQuestionsAnnotations.length - 1 + ] + : null; - // Only render if we have questions - if (!latestFurtherQuestions?.content || latestFurtherQuestions.content.length === 0) { - return null; - } + // Only render if we have questions + if ( + !latestFurtherQuestions?.content || + latestFurtherQuestions.content.length === 0 + ) { + return null; + } - const furtherQuestions = latestFurtherQuestions.content; + const furtherQuestions = latestFurtherQuestions.content; - return ( -
- {/* Main container with improved styling */} -
- {/* Header with better visual separation */} -
-
-

- - - - Follow-up Questions -

- - {furtherQuestions.length} suggestion{furtherQuestions.length !== 1 ? 's' : ''} - + return ( +
+ {/* Main container with improved styling */} +
+ {/* Header with better visual separation */} +
+
+

+ + + + Follow-up Questions +

+ + {furtherQuestions.length} suggestion + {furtherQuestions.length !== 1 ? "s" : ""} + +
-
- {/* Questions container with enhanced scrolling */} -
-
- {/* Left fade gradient */} -
- - {/* Right fade gradient */} -
- - {/* Scrollable container */} -
-
- {furtherQuestions.map((question: any, qIndex: number) => ( - - ))} + {/* Questions container with enhanced scrolling */} +
+
+ {/* Left fade gradient */} +
+ + {/* Right fade gradient */} +
+ + {/* Scrollable container */} +
+
+ {furtherQuestions.map( + (question: any, qIndex: number) => ( + + ), + )} +
-
- ); - })()} + ); + })()} {/* Scroll to bottom button */}
- -
-
- {/* Enhanced Document Selection Dialog */} - - - {}} - documentsCount={documents?.length || 0} - /> - - - - -
- - Select Documents - - {selectedDocuments.length} selected - -
- -
- - Choose documents to include in your research context. Use - filters and search to find specific documents. - -
+ {/* New Chat Input Form */} +
+
+ + {/* Send button */} + +
+
+
+ {/* Enhanced Document Selection Dialog */} + + + {}} + documentsCount={documents?.length || 0} + /> + + + + +
+ + Select Documents + + {selectedDocuments.length} selected + +
+ +
+ + Choose documents to include in your research context. Use + filters and search to find specific documents. + +
- {/* Enhanced Search and Filter Controls */} -
-
- {/* Search Input */} -
- - setDocumentFilter(e.target.value)} - /> - {documentFilter && ( - - )} -
+ {/* Enhanced Search and Filter Controls */} +
+
+ {/* Search Input */} +
+ + setDocumentFilter(e.target.value)} + /> + {documentFilter && ( + + )} +
- {/* Document Type Filter */} - { - setDocumentTypeFilter(newType); - setDocumentsPage(1); // Reset to page 1 when filter changes - }} - counts={documentTypeCounts} - /> -
+ {/* Document Type Filter */} + { + setDocumentTypeFilter(newType); + setDocumentsPage(1); // Reset to page 1 when filter changes + }} + counts={documentTypeCounts} + /> +
- {/* Results Summary */} -
- - {isLoadingDocuments - ? "Loading documents..." - : `Showing ${paginatedDocuments.length} of ${filteredDocuments.length} documents`} - - {filteredDocuments.length > 0 && ( - - Page {documentsPage} of {totalPages} - - )} -
-
+ {/* Results Summary */} +
+ + {isLoadingDocuments + ? "Loading documents..." + : `Showing ${paginatedDocuments.length} of ${filteredDocuments.length} documents`} + + {filteredDocuments.length > 0 && ( + + Page {documentsPage} of {totalPages} + + )} +
+
- {/* Document List with Proper Scrolling */} -
-
- {isLoadingDocuments ? ( - // Enhanced skeleton loading - Array.from({ length: 6 }, (_, i) => ( - - )) - ) : documentsError ? ( -
-
- -
-

- Error loading documents -

-

- Please try refreshing the page -

-
- ) : filteredDocuments.length === 0 ? ( -
-
- -
-

- No documents found -

-

- {documentFilter || documentTypeFilter !== "ALL" - ? "Try adjusting your search or filters" - : "Upload documents to get started"} -

- {!documentFilter && documentTypeFilter === "ALL" && ( - - )} -
- ) : ( - // Enhanced document list - paginatedDocuments.map((document) => { - const isSelected = selectedDocuments.includes( - document.id, - ); - const typeLabel = document.document_type - .replace(/_/g, " ") - .toLowerCase(); + {/* Document List with Proper Scrolling */} +
+
+ {isLoadingDocuments ? ( + // Enhanced skeleton loading + Array.from({ length: 6 }, (_, i) => ( + + )) + ) : documentsError ? ( +
+
+ +
+

+ Error loading documents +

+

+ Please try refreshing the page +

+
+ ) : filteredDocuments.length === 0 ? ( +
+
+ +
+

+ No documents found +

+

+ {documentFilter || documentTypeFilter !== "ALL" + ? "Try adjusting your search or filters" + : "Upload documents to get started"} +

+ {!documentFilter && documentTypeFilter === "ALL" && ( + + )} +
+ ) : ( + // Enhanced document list + paginatedDocuments.map((document) => { + const isSelected = selectedDocuments.includes( + document.id, + ); + const typeLabel = document.document_type + .replace(/_/g, " ") + .toLowerCase(); - return ( -
handleDocumentToggle(document.id)} - > -
-
- {getConnectorIcon(document.document_type)} -
-
-
-
-

- {document.title} -

- {isSelected && ( -
-
- -
-
- )} -
-
- - {typeLabel} - - - {new Date( - document.created_at, - ).toLocaleDateString()} - -
-

- {document.content.substring(0, 200)}... -

-
-
- ); - }) - )} -
-
+ return ( +
handleDocumentToggle(document.id)} + > +
+
+ {getConnectorIcon(document.document_type)} +
+
+
+
+

+ {document.title} +

+ {isSelected && ( +
+
+ +
+
+ )} +
+
+ + {typeLabel} + + + {new Date( + document.created_at, + ).toLocaleDateString()} + +
+

+ {document.content.substring(0, 200)}... +

+
+
+ ); + }) + )} +
+
- {/* Enhanced Pagination Controls */} - {totalPages > 1 && ( -
-
- -
- {Array.from( - { length: Math.min(5, totalPages) }, - (_, i) => { - const page = - documentsPage <= 3 - ? i + 1 - : documentsPage - 2 + i; - if (page > totalPages) return null; - return ( - - ); - }, - )} - {totalPages > 5 && documentsPage < totalPages - 2 && ( - <> - - ... - - - - )} -
- -
-
- )} + {/* Enhanced Pagination Controls */} + {totalPages > 1 && ( +
+
+ +
+ {Array.from( + { length: Math.min(5, totalPages) }, + (_, i) => { + const page = + documentsPage <= 3 + ? i + 1 + : documentsPage - 2 + i; + if (page > totalPages) return null; + return ( + + ); + }, + )} + {totalPages > 5 && documentsPage < totalPages - 2 && ( + <> + + ... + + + + )} +
+ +
+
+ )} - {/* Enhanced Footer */} - -
- - {selectedDocuments.length} of {filteredDocuments.length}{" "} - document{selectedDocuments.length !== 1 ? "s" : ""}{" "} - selected - -
-
- - + - + -
-
-
-
+ if (allSelected) { + setSelectedDocuments((prev) => + prev.filter((id) => !allFilteredIds.includes(id)), + ); + } else { + setSelectedDocuments((prev) => [ + ...new Set([...prev, ...allFilteredIds]), + ]); + } + }} + disabled={filteredDocuments.length === 0} + > + {filteredDocuments.every((doc) => + selectedDocuments.includes(doc.id), + ) + ? "Deselect" + : "Select"}{" "} + All Filtered + +
+ + +
- {/* Connector Selection Dialog */} - - - {}} - /> - - - - Select Connectors - - Choose which data sources to include in your research - - + {/* Connector Selection Dialog */} + + + {}} + /> + + + + Select Connectors + + Choose which data sources to include in your research + + - {/* Connector selection grid */} -
- {isLoadingConnectors ? ( -
- -
- ) : ( - connectorSourceItems.map((connector) => { - const isSelected = selectedConnectors.includes( - connector.type, - ); + {/* Connector selection grid */} +
+ {isLoadingConnectors ? ( +
+ +
+ ) : ( + connectorSourceItems.map((connector) => { + const isSelected = selectedConnectors.includes( + connector.type, + ); - return ( -
{ - setSelectedConnectors( - isSelected - ? selectedConnectors.filter( - (type) => type !== connector.type, - ) - : [...selectedConnectors, connector.type], - ); - }} - role="checkbox" - aria-checked={isSelected} - tabIndex={0} - > -
- {getConnectorIcon(connector.type)} -
- - {connector.name} - - {isSelected && ( - - )} -
- ); - }) - )} -
+ return ( +
{ + setSelectedConnectors( + isSelected + ? selectedConnectors.filter( + (type) => type !== connector.type, + ) + : [...selectedConnectors, connector.type], + ); + }} + role="checkbox" + aria-checked={isSelected} + tabIndex={0} + > +
+ {getConnectorIcon(connector.type)} +
+ + {connector.name} + + {isSelected && ( + + )} +
+ ); + }) + )} +
- -
- - -
-
-
-
+ +
+ + +
+
+
+
- {/* Search Mode Control */} -
- - -
+ {/* Search Mode Control */} +
+ + +
- {/* Research Mode Control */} -
- -
+ {/* Research Mode Control */} +
+ +
- {/* Fast LLM Selector */} -
- -
-
-
-
+ {/* Fast LLM Selector */} +
+ +
+
+
+
- {/* Reference for auto-scrolling */} -
-
- - ); + {/* Reference for auto-scrolling */} +
+
+ + ); }; export default ChatPage; diff --git a/surfsense_web/components/chat/ConnectorComponents.tsx b/surfsense_web/components/chat/ConnectorComponents.tsx index 4d0aa11..d7c977b 100644 --- a/surfsense_web/components/chat/ConnectorComponents.tsx +++ b/surfsense_web/components/chat/ConnectorComponents.tsx @@ -1,6 +1,6 @@ -import React from 'react'; -import { - ChevronDown, +import React from "react"; +import { + ChevronDown, Plus, Search, Globe, @@ -12,78 +12,99 @@ import { Webhook, MessageCircle, FileText, -} from 'lucide-react'; -import { IconBrandNotion, IconBrandSlack, IconBrandYoutube, IconBrandGithub, IconLayoutKanban, IconLinkPlus, IconBrandDiscord } from "@tabler/icons-react"; -import { Button } from '@/components/ui/button'; -import { Connector, ResearchMode } from './types'; +} from "lucide-react"; +import { + IconBrandNotion, + IconBrandSlack, + IconBrandYoutube, + IconBrandGithub, + IconLayoutKanban, + IconLinkPlus, + IconBrandDiscord, + IconTicket, +} from "@tabler/icons-react"; +import { Button } from "@/components/ui/button"; +import { Connector, ResearchMode } from "./types"; // Helper function to get connector icon export const getConnectorIcon = (connectorType: string) => { const iconProps = { className: "h-4 w-4" }; - - switch(connectorType) { - case 'LINKUP_API': + + switch (connectorType) { + case "LINKUP_API": return ; - case 'LINEAR_CONNECTOR': + case "LINEAR_CONNECTOR": return ; - case 'GITHUB_CONNECTOR': + case "GITHUB_CONNECTOR": return ; - case 'YOUTUBE_VIDEO': + case "YOUTUBE_VIDEO": return ; - case 'CRAWLED_URL': + case "CRAWLED_URL": return ; - case 'FILE': - return ; - case 'EXTENSION': - return ; - case 'SERPER_API': - case 'TAVILY_API': + case "FILE": + return ; + case "EXTENSION": + return ; + case "SERPER_API": + case "TAVILY_API": return ; - case 'SLACK_CONNECTOR': + case "SLACK_CONNECTOR": return ; - case 'NOTION_CONNECTOR': + case "NOTION_CONNECTOR": return ; - case 'DISCORD_CONNECTOR': + case "DISCORD_CONNECTOR": return ; - case 'DEEP': + case "JIRA_CONNECTOR": + return ; + case "DEEP": return ; - case 'DEEPER': + case "DEEPER": return ; - case 'DEEPEST': + case "DEEPEST": return ; default: return ; } }; -export const researcherOptions: { value: ResearchMode; label: string; icon: React.ReactNode }[] = [ +export const researcherOptions: { + value: ResearchMode; + label: string; + icon: React.ReactNode; +}[] = [ { - value: 'QNA', - label: 'Q/A', - icon: getConnectorIcon('GENERAL') + value: "QNA", + label: "Q/A", + icon: getConnectorIcon("GENERAL"), }, { - value: 'REPORT_GENERAL', - label: 'General', - icon: getConnectorIcon('GENERAL') + value: "REPORT_GENERAL", + label: "General", + icon: getConnectorIcon("GENERAL"), }, { - value: 'REPORT_DEEP', - label: 'Deep', - icon: getConnectorIcon('DEEP') + value: "REPORT_DEEP", + label: "Deep", + icon: getConnectorIcon("DEEP"), }, { - value: 'REPORT_DEEPER', - label: 'Deeper', - icon: getConnectorIcon('DEEPER') + value: "REPORT_DEEPER", + label: "Deeper", + icon: getConnectorIcon("DEEPER"), }, -] +]; /** * Displays a small icon for a connector type */ -export const ConnectorIcon = ({ type, index = 0 }: { type: string; index?: number }) => ( -
( +
@@ -109,24 +130,30 @@ type ConnectorButtonProps = { /** * Button that displays selected connectors and opens connector selection dialog */ -export const ConnectorButton = ({ selectedConnectors, onClick, connectorSources }: ConnectorButtonProps) => { +export const ConnectorButton = ({ + selectedConnectors, + onClick, + connectorSources, +}: ConnectorButtonProps) => { const totalConnectors = connectorSources.length; const selectedCount = selectedConnectors.length; const progressPercentage = (selectedCount / totalConnectors) * 100; - + // Get the name of a single selected connector const getSingleConnectorName = () => { - const connector = connectorSources.find(c => c.type === selectedConnectors[0]); - return connector?.name || ''; + const connector = connectorSources.find( + (c) => c.type === selectedConnectors[0], + ); + return connector?.name || ""; }; - + // Get display text based on selection count const getDisplayText = () => { if (selectedCount === totalConnectors) return "All Connectors"; if (selectedCount === 1) return getSingleConnectorName(); return `${selectedCount} Connectors`; }; - + // Render the empty state (no connectors selected) const renderEmptyState = () => ( <> @@ -134,7 +161,7 @@ export const ConnectorButton = ({ selectedConnectors, onClick, connectorSources Select Connectors ); - + // Render the selected connectors preview const renderSelectedConnectors = () => ( <> @@ -143,32 +170,36 @@ export const ConnectorButton = ({ selectedConnectors, onClick, connectorSources {selectedConnectors.slice(0, 3).map((type, index) => ( ))} - + {/* Show count indicator if more than 3 connectors are selected */} {selectedCount > 3 && }
- + {/* Display text */} {getDisplayText()} ); - + return (
); -}; \ No newline at end of file +}; diff --git a/surfsense_web/lib/connectors/utils.ts b/surfsense_web/lib/connectors/utils.ts index 022459b..b53ffee 100644 --- a/surfsense_web/lib/connectors/utils.ts +++ b/surfsense_web/lib/connectors/utils.ts @@ -1,14 +1,15 @@ // Helper function to get connector type display name export const getConnectorTypeDisplay = (type: string): string => { - const typeMap: Record = { - "SERPER_API": "Serper API", - "TAVILY_API": "Tavily API", - "SLACK_CONNECTOR": "Slack", - "NOTION_CONNECTOR": "Notion", - "GITHUB_CONNECTOR": "GitHub", - "LINEAR_CONNECTOR": "Linear", - "DISCORD_CONNECTOR": "Discord", - "LINKUP_API": "Linkup", - }; - return typeMap[type] || type; -}; + const typeMap: Record = { + SERPER_API: "Serper API", + TAVILY_API: "Tavily API", + SLACK_CONNECTOR: "Slack", + NOTION_CONNECTOR: "Notion", + GITHUB_CONNECTOR: "GitHub", + LINEAR_CONNECTOR: "Linear", + JIRA_CONNECTOR: "Jira", + DISCORD_CONNECTOR: "Discord", + LINKUP_API: "Linkup", + }; + return typeMap[type] || type; +};