diff --git a/.gitignore b/.gitignore
index ac12668..b67a7dd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,2 @@
-.flashrank_cache*
\ No newline at end of file
+.flashrank_cache*
+podcasts/*
diff --git a/surfsense_backend/.env.example b/surfsense_backend/.env.example
index 6dfcc99..8e834bf 100644
--- a/surfsense_backend/.env.example
+++ b/surfsense_backend/.env.example
@@ -15,6 +15,9 @@ FAST_LLM="openai/gpt-4o-mini"
STRATEGIC_LLM="openai/gpt-4o"
LONG_CONTEXT_LLM="gemini/gemini-2.0-flash"
+#LiteLLM TTS Provider: https://docs.litellm.ai/docs/text_to_speech#supported-providers
+TTS_SERVICE="openai/tts-1"
+
# Chosen LiteLLM Providers Keys
OPENAI_API_KEY="sk-proj-iA"
GEMINI_API_KEY="AIzaSyB6-1641124124124124124124124124124"
diff --git a/surfsense_backend/app/agents/podcaster/__init__.py b/surfsense_backend/app/agents/podcaster/__init__.py
new file mode 100644
index 0000000..8459b29
--- /dev/null
+++ b/surfsense_backend/app/agents/podcaster/__init__.py
@@ -0,0 +1,8 @@
+"""New LangGraph Agent.
+
+This module defines a custom graph.
+"""
+
+from .graph import graph
+
+__all__ = ["graph"]
diff --git a/surfsense_backend/app/agents/podcaster/configuration.py b/surfsense_backend/app/agents/podcaster/configuration.py
new file mode 100644
index 0000000..6bbb4ce
--- /dev/null
+++ b/surfsense_backend/app/agents/podcaster/configuration.py
@@ -0,0 +1,28 @@
+"""Define the configurable parameters for the agent."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, fields
+from typing import Optional
+
+from langchain_core.runnables import RunnableConfig
+
+
+@dataclass(kw_only=True)
+class Configuration:
+ """The configuration for the agent."""
+
+ # Changeme: Add configurable values here!
+ # these values can be pre-set when you
+ # create assistants (https://langchain-ai.github.io/langgraph/cloud/how-tos/configuration_cloud/)
+ # and when you invoke the graph
+ podcast_title: str
+
+ @classmethod
+ def from_runnable_config(
+ cls, config: Optional[RunnableConfig] = None
+ ) -> Configuration:
+ """Create a Configuration instance from a RunnableConfig object."""
+ configurable = (config.get("configurable") or {}) if config else {}
+ _fields = {f.name for f in fields(cls) if f.init}
+ return cls(**{k: v for k, v in configurable.items() if k in _fields})
diff --git a/surfsense_backend/app/agents/podcaster/graph.py b/surfsense_backend/app/agents/podcaster/graph.py
new file mode 100644
index 0000000..f4604a7
--- /dev/null
+++ b/surfsense_backend/app/agents/podcaster/graph.py
@@ -0,0 +1,23 @@
+from langgraph.graph import StateGraph
+
+from .configuration import Configuration
+from .state import State
+
+
+from .nodes import create_merged_podcast_audio, create_podcast_transcript
+
+# Define a new graph
+workflow = StateGraph(State, config_schema=Configuration)
+
+# Add the node to the graph
+workflow.add_node("create_podcast_transcript", create_podcast_transcript)
+workflow.add_node("create_merged_podcast_audio", create_merged_podcast_audio)
+
+# Set the entrypoint as `call_model`
+workflow.add_edge("__start__", "create_podcast_transcript")
+workflow.add_edge("create_podcast_transcript", "create_merged_podcast_audio")
+workflow.add_edge("create_merged_podcast_audio", "__end__")
+
+# Compile the workflow into an executable graph
+graph = workflow.compile()
+graph.name = "Surfsense Podcaster" # This defines the custom name in LangSmith
diff --git a/surfsense_backend/app/agents/podcaster/nodes.py b/surfsense_backend/app/agents/podcaster/nodes.py
new file mode 100644
index 0000000..810307e
--- /dev/null
+++ b/surfsense_backend/app/agents/podcaster/nodes.py
@@ -0,0 +1,197 @@
+from typing import Any, Dict
+import json
+import os
+import uuid
+from pathlib import Path
+import asyncio
+
+from langchain_core.messages import HumanMessage, SystemMessage
+from langchain_core.runnables import RunnableConfig
+from litellm import aspeech
+from ffmpeg.asyncio import FFmpeg
+
+from .configuration import Configuration
+from .state import PodcastTranscriptEntry, State, PodcastTranscripts
+from .prompts import get_podcast_generation_prompt
+from app.config import config as app_config
+
+
+async def create_podcast_transcript(state: State, config: RunnableConfig) -> Dict[str, Any]:
+ """Each node does work."""
+
+ # Initialize LLM
+ llm = app_config.long_context_llm_instance
+
+ # Get the prompt
+ prompt = get_podcast_generation_prompt()
+
+ # Create the messages
+ messages = [
+ SystemMessage(content=prompt),
+ HumanMessage(content=state.source_content)
+ ]
+
+ # Generate the podcast transcript
+ llm_response = await llm.ainvoke(messages)
+
+ # First try the direct approach
+ try:
+ podcast_transcript = PodcastTranscripts.model_validate(json.loads(llm_response.content))
+ except (json.JSONDecodeError, ValueError) as e:
+ print(f"Direct JSON parsing failed, trying fallback approach: {str(e)}")
+
+ # Fallback: Parse the JSON response manually
+ try:
+ # Extract JSON content from the response
+ content = llm_response.content
+
+ # Find the JSON in the content (handle case where LLM might add additional text)
+ json_start = content.find('{')
+ json_end = content.rfind('}') + 1
+ if json_start >= 0 and json_end > json_start:
+ json_str = content[json_start:json_end]
+
+ # Parse the JSON string
+ parsed_data = json.loads(json_str)
+
+ # Convert to Pydantic model
+ podcast_transcript = PodcastTranscripts.model_validate(parsed_data)
+
+ print(f"Successfully parsed podcast transcript using fallback approach")
+ else:
+ # If JSON structure not found, raise a clear error
+ error_message = f"Could not find valid JSON in LLM response. Raw response: {content}"
+ print(error_message)
+ raise ValueError(error_message)
+
+ except (json.JSONDecodeError, ValueError) as e2:
+ # Log the error and re-raise it
+ error_message = f"Error parsing LLM response (fallback also failed): {str(e2)}"
+ print(f"Error parsing LLM response: {str(e2)}")
+ print(f"Raw response: {llm_response.content}")
+ raise
+
+ return {
+ "podcast_transcript": podcast_transcript.podcast_transcripts
+ }
+
+
+async def create_merged_podcast_audio(state: State, config: RunnableConfig) -> Dict[str, Any]:
+ """Generate audio for each transcript and merge them into a single podcast file."""
+
+ configuration = Configuration.from_runnable_config(config)
+
+ starting_transcript = PodcastTranscriptEntry(
+ speaker_id=1,
+ dialog=f"Welcome to {configuration.podcast_title} Podcast."
+ )
+
+ transcript = state.podcast_transcript
+
+ # Merge the starting transcript with the podcast transcript
+ # Check if transcript is a PodcastTranscripts object or already a list
+ if hasattr(transcript, 'podcast_transcripts'):
+ transcript_entries = transcript.podcast_transcripts
+ else:
+ transcript_entries = transcript
+
+ merged_transcript = [starting_transcript] + transcript_entries
+
+ # Create a temporary directory for audio files
+ temp_dir = Path("temp_audio")
+ temp_dir.mkdir(exist_ok=True)
+
+ # Generate a unique session ID for this podcast
+ session_id = str(uuid.uuid4())
+ output_path = f"podcasts/{session_id}_podcast.mp3"
+ os.makedirs("podcasts", exist_ok=True)
+
+ # Map of speaker_id to voice
+ voice_mapping = {
+ 0: "alloy", # Default/intro voice
+ 1: "echo", # First speaker
+ # 2: "fable", # Second speaker
+ # 3: "onyx", # Third speaker
+ # 4: "nova", # Fourth speaker
+ # 5: "shimmer" # Fifth speaker
+ }
+
+ # Generate audio for each transcript segment
+ audio_files = []
+
+ async def generate_speech_for_segment(segment, index):
+ # Handle both dictionary and PodcastTranscriptEntry objects
+ if hasattr(segment, 'speaker_id'):
+ speaker_id = segment.speaker_id
+ dialog = segment.dialog
+ else:
+ speaker_id = segment.get("speaker_id", 0)
+ dialog = segment.get("dialog", "")
+
+ # Select voice based on speaker_id
+ voice = voice_mapping.get(speaker_id, "alloy")
+
+ # Generate a unique filename for this segment
+ filename = f"{temp_dir}/{session_id}_{index}.mp3"
+
+ try:
+ # Generate speech using litellm
+ response = await aspeech(
+ model=app_config.TTS_SERVICE,
+ voice=voice,
+ input=dialog,
+ max_retries=2,
+ timeout=600,
+ )
+
+ # Save the audio to a file - use proper streaming method
+ with open(filename, 'wb') as f:
+ f.write(response.content)
+
+ return filename
+ except Exception as e:
+ print(f"Error generating speech for segment {index}: {str(e)}")
+ raise
+
+ # Generate all audio files concurrently
+ tasks = [generate_speech_for_segment(segment, i) for i, segment in enumerate(merged_transcript)]
+ audio_files = await asyncio.gather(*tasks)
+
+ # Merge audio files using ffmpeg
+ try:
+ # Create FFmpeg instance with the first input
+ ffmpeg = FFmpeg().option("y")
+
+ # Add each audio file as input
+ for audio_file in audio_files:
+ ffmpeg = ffmpeg.input(audio_file)
+
+ # Configure the concatenation and output
+ filter_complex = []
+ for i in range(len(audio_files)):
+ filter_complex.append(f"[{i}:0]")
+
+ filter_complex_str = "".join(filter_complex) + f"concat=n={len(audio_files)}:v=0:a=1[outa]"
+ ffmpeg = ffmpeg.option("filter_complex", filter_complex_str)
+ ffmpeg = ffmpeg.output(output_path, map="[outa]")
+
+ # Execute FFmpeg
+ await ffmpeg.execute()
+
+ print(f"Successfully created podcast audio: {output_path}")
+
+ except Exception as e:
+ print(f"Error merging audio files: {str(e)}")
+ raise
+ finally:
+ # Clean up temporary files
+ for audio_file in audio_files:
+ try:
+ os.remove(audio_file)
+ except:
+ pass
+
+ return {
+ "podcast_transcript": merged_transcript,
+ "final_podcast_file_path": output_path
+ }
diff --git a/surfsense_backend/app/agents/podcaster/prompts.py b/surfsense_backend/app/agents/podcaster/prompts.py
new file mode 100644
index 0000000..2b4bdcf
--- /dev/null
+++ b/surfsense_backend/app/agents/podcaster/prompts.py
@@ -0,0 +1,111 @@
+import datetime
+
+
+def get_podcast_generation_prompt():
+ return f"""
+Today's date: {datetime.datetime.now().strftime("%Y-%m-%d")}
+
+You are a master podcast scriptwriter, adept at transforming diverse input content into a lively, engaging, and natural-sounding conversation between two distinct podcast hosts. Your primary objective is to craft authentic, flowing dialogue that captures the spontaneity and chemistry of a real podcast discussion, completely avoiding any hint of robotic scripting or stiff formality. Think dynamic interplay, not just information delivery.
+
+
+- '': A block of text containing the information to be discussed in the podcast. This could be research findings, an article summary, a detailed outline, user chat history related to the topic, or any other relevant raw information. The content might be unstructured but serves as the factual basis for the podcast dialogue.
+
+
+
+A JSON object containing the podcast transcript with alternating speakers:
+{{
+ "podcast_transcripts": [
+ {{
+ "speaker_id": 0,
+ "dialog": "Speaker 0 dialog here"
+ }},
+ {{
+ "speaker_id": 1,
+ "dialog": "Speaker 1 dialog here"
+ }},
+ {{
+ "speaker_id": 0,
+ "dialog": "Speaker 0 dialog here"
+ }},
+ {{
+ "speaker_id": 1,
+ "dialog": "Speaker 1 dialog here"
+ }}
+ ]
+}}
+
+
+
+1. **Establish Distinct & Consistent Host Personas:**
+ * **Speaker 0 (Lead Host):** Drives the conversation forward, introduces segments, poses key questions derived from the source content, and often summarizes takeaways. Maintain a guiding, clear, and engaging tone.
+ * **Speaker 1 (Co-Host/Expert):** Offers deeper insights, provides alternative viewpoints or elaborations on the source content, asks clarifying or challenging questions, and shares relevant anecdotes or examples. Adopt a complementary tone (e.g., analytical, enthusiastic, reflective, slightly skeptical).
+ * **Consistency is Key:** Ensure each speaker maintains their distinct voice, vocabulary choice, sentence structure, and perspective throughout the entire script. Avoid having them sound interchangeable. Their interaction should feel like a genuine partnership.
+
+2. **Craft Natural & Dynamic Dialogue:**
+ * **Emulate Real Conversation:** Use contractions (e.g., "don't", "it's"), interjections ("Oh!", "Wow!", "Hmm"), discourse markers ("you know", "right?", "well"), and occasional natural pauses or filler words. Avoid overly formal language or complex sentence structures typical of written text.
+ * **Foster Interaction & Chemistry:** Write dialogue where speakers genuinely react *to each other*. They should build on points ("Exactly, and that reminds me..."), ask follow-up questions ("Could you expand on that?"), express agreement/disagreement respectfully ("That's a fair point, but have you considered...?"), and show active listening.
+ * **Vary Rhythm & Pace:** Mix short, punchy lines with longer, more explanatory ones. Vary sentence beginnings. Use questions to break up exposition. The rhythm should feel spontaneous, not monotonous.
+ * **Inject Personality & Relatability:** Allow for appropriate humor, moments of surprise or curiosity, brief personal reflections ("I actually experienced something similar..."), or relatable asides that fit the hosts' personas and the topic. Lightly reference past discussions if it enhances context ("Remember last week when we touched on...?").
+
+3. **Structure for Flow and Listener Engagement:**
+ * **Natural Beginning:** Start with dialogue that flows naturally after an introduction (which will be added manually). Avoid redundant greetings or podcast name mentions since these will be added separately.
+ * **Logical Progression & Signposting:** Guide the listener through the information smoothly. Use clear transitions to link different ideas or segments ("So, now that we've covered X, let's dive into Y...", "That actually brings me to another key finding..."). Ensure topics flow logically from one to the next.
+ * **Meaningful Conclusion:** Summarize the key takeaways or main points discussed, reinforcing the core message derived from the source content. End with a final thought, a lingering question for the audience, or a brief teaser for what's next, providing a sense of closure. Avoid abrupt endings.
+
+4. **Integrate Source Content Seamlessly & Accurately:**
+ * **Translate, Don't Recite:** Rephrase information from the `` into conversational language suitable for each host's persona. Avoid directly copying dense sentences or technical jargon without explanation. The goal is discussion, not narration.
+ * **Explain & Contextualize:** Use analogies, simple examples, storytelling, or have one host ask clarifying questions (acting as a listener surrogate) to break down complex ideas from the source.
+ * **Weave Information Naturally:** Integrate facts, data, or key points from the source *within* the dialogue, not as standalone, undigested blocks. Attribute information conversationally where appropriate ("The research mentioned...", "Apparently, the key factor is...").
+ * **Balance Depth & Accessibility:** Ensure the conversation is informative and factually accurate based on the source content, but prioritize clear communication and engaging delivery over exhaustive technical detail. Make it understandable and interesting for a general audience.
+
+5. **Length & Pacing:**
+ * **Six-Minute Duration:** Create a transcript that, when read at a natural speaking pace, would result in approximately 6 minutes of audio. Typically, this means around 1000 words total (based on average speaking rate of 150 words per minute).
+ * **Concise Speaking Turns:** Keep most speaking turns relatively brief and focused. Aim for a natural back-and-forth rhythm rather than extended monologues.
+ * **Essential Content Only:** Prioritize the most important information from the source content. Focus on quality over quantity, ensuring every line contributes meaningfully to the topic.
+
+
+
+Input: "Quantum computing uses quantum bits or qubits which can exist in multiple states simultaneously due to superposition."
+
+Output:
+{{
+ "podcast_transcripts": [
+ {{
+ "speaker_id": 0,
+ "dialog": "Today we're diving into the mind-bending world of quantum computing. You know, this is a topic I've been excited to cover for weeks."
+ }},
+ {{
+ "speaker_id": 1,
+ "dialog": "Same here! And I know our listeners have been asking for it. But I have to admit, the concept of quantum computing makes my head spin a little. Can we start with the basics?"
+ }},
+ {{
+ "speaker_id": 0,
+ "dialog": "Absolutely. So regular computers use bits, right? Little on-off switches that are either 1 or 0. But quantum computers use something called qubits, and this is where it gets fascinating."
+ }},
+ {{
+ "speaker_id": 1,
+ "dialog": "Wait, what makes qubits so special compared to regular bits?"
+ }},
+ {{
+ "speaker_id": 0,
+ "dialog": "The magic is in something called superposition. These qubits can exist in multiple states at the same time, not just 1 or 0."
+ }},
+ {{
+ "speaker_id": 1,
+ "dialog": "That sounds impossible! How would you even picture that?"
+ }},
+ {{
+ "speaker_id": 0,
+ "dialog": "Think of it like a coin spinning in the air. Before it lands, is it heads or tails?"
+ }},
+ {{
+ "speaker_id": 1,
+ "dialog": "Well, it's... neither? Or I guess both, until it lands? Oh, I think I see where you're going with this."
+ }}
+ ]
+}}
+
+
+Transform the source material into a lively and engaging podcast conversation. Craft dialogue that showcases authentic host chemistry and natural interaction (including occasional disagreement, building on points, or asking follow-up questions). Use varied speech patterns reflecting real human conversation, ensuring the final script effectively educates *and* entertains the listener while keeping within a 3-minute audio duration.
+
+"""
\ No newline at end of file
diff --git a/surfsense_backend/app/agents/podcaster/state.py b/surfsense_backend/app/agents/podcaster/state.py
new file mode 100644
index 0000000..d77270d
--- /dev/null
+++ b/surfsense_backend/app/agents/podcaster/state.py
@@ -0,0 +1,38 @@
+"""Define the state structures for the agent."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import List, Optional
+from pydantic import BaseModel, Field
+
+
+class PodcastTranscriptEntry(BaseModel):
+ """
+ Represents a single entry in a podcast transcript.
+ """
+ speaker_id: int = Field(..., description="The ID of the speaker (0 or 1)")
+ dialog: str = Field(..., description="The dialog text spoken by the speaker")
+
+
+class PodcastTranscripts(BaseModel):
+ """
+ Represents the full podcast transcript structure.
+ """
+ podcast_transcripts: List[PodcastTranscriptEntry] = Field(
+ ...,
+ description="List of transcript entries with alternating speakers"
+ )
+
+@dataclass
+class State:
+ """Defines the input state for the agent, representing a narrower interface to the outside world.
+
+ This class is used to define the initial state and structure of incoming data.
+ See: https://langchain-ai.github.io/langgraph/concepts/low_level/#state
+ for more information.
+ """
+
+ source_content: str
+ podcast_transcript: Optional[List[PodcastTranscriptEntry]] = None
+ final_podcast_file_path: Optional[str] = None
diff --git a/surfsense_backend/app/agents/podcaster/test_podcaster.py b/surfsense_backend/app/agents/podcaster/test_podcaster.py
new file mode 100644
index 0000000..df6728c
--- /dev/null
+++ b/surfsense_backend/app/agents/podcaster/test_podcaster.py
@@ -0,0 +1,474 @@
+#!/usr/bin/env python
+"""
+Test script for the Surfsense Podcaster agent.
+Run this directly from VS Code to test the Podcaster agent.
+"""
+
+import asyncio
+import os
+import sys
+from pathlib import Path
+
+# Add the project root to the Python path
+project_root = str(Path(__file__).resolve().parent.parent.parent.parent)
+if project_root not in sys.path:
+ sys.path.insert(0, project_root)
+
+from langchain_core.runnables import RunnableConfig
+
+# Now import modules using absolute imports
+from app.agents.podcaster.graph import graph
+from app.agents.podcaster.state import State
+
+
+async def test_podcaster_agent():
+ """Test the Podcaster agent with a sample input."""
+
+ # Print banner
+ print("=" * 80)
+ print("SURFSENSE PODCASTER AGENT TEST")
+ print("=" * 80)
+
+ # Sample input for testing
+ sample_source_content = """
+
Deep-Live-Cam
+
+
+ Real-time face swap and video deepfake with a single click and only a single image.
+