mastra object structure udpates (#881)

This commit is contained in:
sreedharsreeram 2026-04-24 23:47:46 +00:00
parent c76d27f284
commit d7ee078ae9
7 changed files with 559 additions and 459 deletions

View file

@ -34,11 +34,10 @@ const agent = new Agent(withSupermemory(
model: openai("gpt-4o"),
instructions: "You are a helpful assistant.",
},
"user-123", // containerTag - scopes memories to this user
{
containerTag: "user-123", // Required: scopes memories to this user
customId: "conv-456", // Required: groups messages for contextual memory
mode: "full",
addMemory: "always",
threadId: "conv-456",
}
))
@ -46,15 +45,15 @@ const response = await agent.generate("What do you know about me?")
```
<Note>
**Memory saving is disabled by default.** The wrapper only retrieves existing memories. To automatically save conversations:
**Memory saving is enabled by default.** Conversations are automatically saved to Supermemory. To disable saving:
```typescript
const agent = new Agent(withSupermemory(
{ id: "my-assistant", model: openai("gpt-4o"), ... },
"user-123",
{
addMemory: "always",
threadId: "conv-456" // Required for conversation grouping
containerTag: "user-123",
customId: "conv-456",
addMemory: "never", // Disable automatic conversation saving
}
))
```
@ -96,11 +95,12 @@ sequenceDiagram
| Option | Type | Default | Description |
|--------|------|---------|-------------|
| `containerTag` | `string` | **Required** | User/container tag for scoping memories |
| `customId` | `string` | **Required** | Groups messages into a single document for contextual memory |
| `apiKey` | `string` | `SUPERMEMORY_API_KEY` env | Your Supermemory API key |
| `baseUrl` | `string` | `https://api.supermemory.ai` | Custom API endpoint |
| `mode` | `"profile" \| "query" \| "full"` | `"profile"` | Memory search mode |
| `addMemory` | `"always" \| "never"` | `"never"` | Auto-save conversations |
| `threadId` | `string` | - | Conversation ID for grouping messages |
| `addMemory` | `"always" \| "never"` | `"always"` | Auto-save conversations |
| `verbose` | `boolean` | `false` | Enable debug logging |
| `promptTemplate` | `function` | - | Custom memory formatting |
@ -111,19 +111,31 @@ sequenceDiagram
**Profile Mode (Default)** - Retrieves the user's complete profile without query-based filtering:
```typescript
const agent = new Agent(withSupermemory(config, "user-123", { mode: "profile" }))
const agent = new Agent(withSupermemory(config, {
containerTag: "user-123",
customId: "conv-456",
mode: "profile",
}))
```
**Query Mode** - Searches memories based on the user's message:
```typescript
const agent = new Agent(withSupermemory(config, "user-123", { mode: "query" }))
const agent = new Agent(withSupermemory(config, {
containerTag: "user-123",
customId: "conv-456",
mode: "query",
}))
```
**Full Mode** - Combines profile AND query-based search for maximum context:
```typescript
const agent = new Agent(withSupermemory(config, "user-123", { mode: "full" }))
const agent = new Agent(withSupermemory(config, {
containerTag: "user-123",
customId: "conv-456",
mode: "full",
}))
### Mode Comparison
@ -137,26 +149,34 @@ const agent = new Agent(withSupermemory(config, "user-123", { mode: "full" }))
## Saving Conversations
Enable automatic conversation saving with `addMemory: "always"`. A `threadId` is required to group messages:
Conversation saving is enabled by default (`addMemory: "always"`). Messages are grouped using the required `customId`:
```typescript
const agent = new Agent(withSupermemory(
{ id: "my-assistant", model: openai("gpt-4o"), instructions: "..." },
"user-123",
{
addMemory: "always",
threadId: "conv-456",
containerTag: "user-123",
customId: "conv-456", // Required: groups messages for contextual memory
}
))
// All messages in this conversation are saved
// All messages in this conversation are saved automatically
await agent.generate("I prefer TypeScript over JavaScript")
await agent.generate("My favorite framework is Next.js")
```
<Warning>
Without a `threadId`, the output processor will log a warning and skip saving. Always provide a `threadId` when using `addMemory: "always"`.
</Warning>
To disable automatic saving:
```typescript
const agent = new Agent(withSupermemory(
{ id: "my-assistant", model: openai("gpt-4o"), instructions: "..." },
{
containerTag: "user-123",
customId: "conv-456",
addMemory: "never", // Only retrieve memories, don't save
}
))
```
---
@ -182,8 +202,9 @@ const claudePrompt = (data: MemoryPromptData) => `
const agent = new Agent(withSupermemory(
{ id: "my-assistant", model: openai("gpt-4o"), instructions: "..." },
"user-123",
{
containerTag: "user-123",
customId: "conv-456",
mode: "full",
promptTemplate: claudePrompt,
}
@ -210,8 +231,11 @@ const agent = new Agent({
name: "My Assistant",
model: openai("gpt-4o"),
inputProcessors: [
createSupermemoryProcessor("user-123", {
createSupermemoryProcessor({
containerTag: "user-123",
customId: "conv-456",
mode: "full",
addMemory: "never",
verbose: true,
}),
],
@ -232,9 +256,9 @@ const agent = new Agent({
name: "My Assistant",
model: openai("gpt-4o"),
outputProcessors: [
createSupermemoryOutputProcessor("user-123", {
addMemory: "always",
threadId: "conv-456",
createSupermemoryOutputProcessor({
containerTag: "user-123",
customId: "conv-456",
}),
],
})
@ -249,10 +273,10 @@ import { Agent } from "@mastra/core/agent"
import { createSupermemoryProcessors } from "@supermemory/tools/mastra"
import { openai } from "@ai-sdk/openai"
const { input, output } = createSupermemoryProcessors("user-123", {
const { input, output } = createSupermemoryProcessors({
containerTag: "user-123",
customId: "conv-456",
mode: "full",
addMemory: "always",
threadId: "conv-456",
verbose: true,
})
@ -267,9 +291,9 @@ const agent = new Agent({
---
## Using RequestContext
## Using RequestContext for Dynamic Thread IDs
Mastra's `RequestContext` can provide `threadId` dynamically:
For server setups where one agent instance handles multiple concurrent conversations, use Mastra's `RequestContext` to provide per-request thread IDs. **RequestContext takes precedence** over the construction-time `customId`:
```typescript
import { Agent } from "@mastra/core/agent"
@ -279,21 +303,25 @@ import { openai } from "@ai-sdk/openai"
const agent = new Agent(withSupermemory(
{ id: "my-assistant", model: openai("gpt-4o"), instructions: "..." },
"user-123",
{
containerTag: "user-123",
customId: "fallback-conv", // Used only when RequestContext doesn't provide a threadId
mode: "full",
addMemory: "always",
// threadId not set - will use RequestContext
}
))
// Set threadId dynamically via RequestContext
// Per-request threadId takes precedence over customId
const ctx = new RequestContext()
ctx.set(MASTRA_THREAD_ID_KEY, "dynamic-thread-id")
ctx.set(MASTRA_THREAD_ID_KEY, "user-456-session-789")
await agent.generate("Hello!", { requestContext: ctx })
// This conversation is stored under "user-456-session-789", not "fallback-conv"
```
<Note>
**Server-side usage**: Always use `RequestContext` to pass unique conversation IDs per request. Using a fixed `customId` for all requests will merge conversations from different users.
</Note>
---
## Verbose Logging
@ -303,8 +331,11 @@ Enable detailed logging for debugging:
```typescript
const agent = new Agent(withSupermemory(
{ id: "my-assistant", model: openai("gpt-4o"), instructions: "..." },
"user-123",
{ verbose: true }
{
containerTag: "user-123",
customId: "conv-456",
verbose: true,
}
))
// Console output:
@ -330,7 +361,10 @@ const agent = new Agent(withSupermemory(
inputProcessors: [myLoggingProcessor],
outputProcessors: [myAnalyticsProcessor],
},
"user-123"
{
containerTag: "user-123",
customId: "conv-456",
}
))
```
@ -345,15 +379,13 @@ Enhances a Mastra agent config with memory capabilities.
```typescript
function withSupermemory<T extends AgentConfig>(
config: T,
containerTag: string,
options?: SupermemoryMastraOptions
options: SupermemoryMastraOptions
): T
```
**Parameters:**
- `config` - The Mastra agent configuration object
- `containerTag` - User/container ID for scoping memories
- `options` - Configuration options
- `options` - Configuration options (includes required `containerTag` and `customId`)
**Returns:** Enhanced config with Supermemory processors injected
@ -363,8 +395,7 @@ Creates an input processor for memory injection.
```typescript
function createSupermemoryProcessor(
containerTag: string,
options?: SupermemoryMastraOptions
options: SupermemoryMastraOptions
): SupermemoryInputProcessor
```
@ -374,8 +405,7 @@ Creates an output processor for conversation saving.
```typescript
function createSupermemoryOutputProcessor(
containerTag: string,
options?: SupermemoryMastraOptions
options: SupermemoryMastraOptions
): SupermemoryOutputProcessor
```
@ -385,8 +415,7 @@ Creates both processors with shared configuration.
```typescript
function createSupermemoryProcessors(
containerTag: string,
options?: SupermemoryMastraOptions
options: SupermemoryMastraOptions
): {
input: SupermemoryInputProcessor
output: SupermemoryOutputProcessor
@ -397,11 +426,12 @@ function createSupermemoryProcessors(
```typescript
interface SupermemoryMastraOptions {
containerTag: string // Required: User/container tag for scoping memories
customId: string // Required: Groups messages for contextual memory generation
apiKey?: string
baseUrl?: string
mode?: "profile" | "query" | "full"
addMemory?: "always" | "never"
threadId?: string
addMemory?: "always" | "never" // Default: "always"
verbose?: boolean
promptTemplate?: (data: MemoryPromptData) => string
}
@ -423,14 +453,16 @@ Processors gracefully handle errors without breaking the agent:
- **API errors** - Logged and skipped; agent continues without memories
- **Missing API key** - Throws immediately with helpful error message
- **Missing threadId** - Warns in console; skips saving
```typescript
// Missing API key throws immediately
const agent = new Agent(withSupermemory(
{ id: "my-assistant", model: openai("gpt-4o"), instructions: "..." },
"user-123",
{ apiKey: undefined } // Will check SUPERMEMORY_API_KEY env
{
containerTag: "user-123",
customId: "conv-456",
apiKey: undefined, // Will check SUPERMEMORY_API_KEY env
}
))
// Error: SUPERMEMORY_API_KEY is not set
```

View file

@ -414,7 +414,7 @@ const addResult = await tools.addMemory({
Add persistent memory to [Mastra](https://mastra.ai) AI agents. The integration provides processors that:
- **Input Processor**: Fetches relevant memories and injects them into the system prompt before LLM calls
- **Output Processor**: Optionally saves conversations to Supermemory after responses
- **Output Processor**: Saves conversations to Supermemory after responses (enabled by default)
#### Quick Start with `withSupermemory` Wrapper
@ -433,11 +433,10 @@ const agent = new Agent(withSupermemory(
model: openai("gpt-4o"),
instructions: "You are a helpful assistant.",
},
"user-123", // containerTag - scopes memories to this user
{
containerTag: "user-123", // Required: scopes memories to this user
customId: "conv-456", // Required: groups messages for contextual memory
mode: "full",
addMemory: "always",
threadId: "conv-456",
}
))
@ -454,10 +453,10 @@ import { Agent } from "@mastra/core/agent"
import { createSupermemoryProcessors } from "@supermemory/tools/mastra"
import { openai } from "@ai-sdk/openai"
const { input, output } = createSupermemoryProcessors("user-123", {
const { input, output } = createSupermemoryProcessors({
containerTag: "user-123",
customId: "conv-456",
mode: "full",
addMemory: "always",
threadId: "conv-456",
verbose: true, // Enable logging
})
@ -484,12 +483,12 @@ import { openai } from "@ai-sdk/openai"
async function main() {
const userId = "user-alex-123"
const threadId = `thread-${Date.now()}`
const customId = `thread-${Date.now()}`
const { input, output } = createSupermemoryProcessors(userId, {
const { input, output } = createSupermemoryProcessors({
containerTag: userId,
customId,
mode: "profile", // Fetch user profile memories
addMemory: "always", // Save all conversations
threadId,
verbose: true,
})
@ -525,13 +524,25 @@ main()
```typescript
// Profile mode - good for general personalization
const { input } = createSupermemoryProcessors("user-123", { mode: "profile" })
const { input } = createSupermemoryProcessors({
containerTag: "user-123",
customId: "conv-456",
mode: "profile",
})
// Query mode - good for specific lookups
const { input } = createSupermemoryProcessors("user-123", { mode: "query" })
const { input } = createSupermemoryProcessors({
containerTag: "user-123",
customId: "conv-456",
mode: "query",
})
// Full mode - comprehensive context
const { input } = createSupermemoryProcessors("user-123", { mode: "full" })
const { input } = createSupermemoryProcessors({
containerTag: "user-123",
customId: "conv-456",
mode: "full",
})
```
#### Custom Prompt Templates
@ -548,7 +559,9 @@ ${data.generalSearchMemories}
</user_context>
`.trim()
const { input, output } = createSupermemoryProcessors("user-123", {
const { input, output } = createSupermemoryProcessors({
containerTag: "user-123",
customId: "conv-456",
mode: "full",
promptTemplate: customTemplate,
})
@ -556,17 +569,17 @@ const { input, output } = createSupermemoryProcessors("user-123", {
#### Using RequestContext for Dynamic Thread IDs
Instead of hardcoding `threadId`, use Mastra's RequestContext for dynamic values:
For server setups where one agent instance handles multiple concurrent conversations, use Mastra's `RequestContext` to provide per-request thread IDs. **RequestContext takes precedence** over the construction-time `customId`:
```typescript
import { Agent } from "@mastra/core/agent"
import { RequestContext, MASTRA_THREAD_ID_KEY } from "@mastra/core/request-context"
import { createSupermemoryProcessors } from "@supermemory/tools/mastra"
const { input, output } = createSupermemoryProcessors("user-123", {
const { input, output } = createSupermemoryProcessors({
containerTag: "user-123",
customId: "fallback-conv", // Used only when RequestContext doesn't provide a threadId
mode: "profile",
addMemory: "always",
// threadId not set here - will be read from RequestContext
})
const agent = new Agent({
@ -577,22 +590,26 @@ const agent = new Agent({
outputProcessors: [output],
})
// Set threadId dynamically per request
// Per-request threadId takes precedence over customId
const ctx = new RequestContext()
ctx.set(MASTRA_THREAD_ID_KEY, "dynamic-thread-123")
ctx.set(MASTRA_THREAD_ID_KEY, "user-456-session-789")
const response = await agent.generate("Hello!", { requestContext: ctx })
// This conversation is stored under "user-456-session-789", not "fallback-conv"
```
> **Server-side usage**: Always use `RequestContext` to pass unique conversation IDs per request. Using a fixed `customId` for all requests will merge conversations from different users.
#### Mastra Configuration Options
```typescript
interface SupermemoryMastraOptions {
containerTag: string // Required: User/container tag for scoping memories
customId: string // Required: Groups messages into a single document for contextual memory
apiKey?: string // Supermemory API key (or use SUPERMEMORY_API_KEY env var)
baseUrl?: string // Custom API endpoint
mode?: "profile" | "query" | "full" // Memory search mode (default: "profile")
addMemory?: "always" | "never" // Auto-save conversations (default: "never")
threadId?: string // Conversation ID for grouping messages
addMemory?: "always" | "never" // Auto-save conversations (default: "always")
verbose?: boolean // Enable debug logging (default: false)
promptTemplate?: (data: MemoryPromptData) => string // Custom memory formatting
}

View file

@ -43,11 +43,11 @@ import type {
*/
interface ProcessorContext {
containerTag: string
customId: string
apiKey: string
baseUrl: string
mode: MemoryMode
addMemory: "always" | "never"
threadId?: string
logger: Logger
promptTemplate?: PromptTemplate
memoryCache: MemoryCache<string>
@ -57,20 +57,19 @@ interface ProcessorContext {
* Creates the shared processor context from options.
*/
function createProcessorContext(
containerTag: string,
options: SupermemoryMastraOptions = {},
options: SupermemoryMastraOptions,
): ProcessorContext {
const apiKey = validateApiKey(options.apiKey)
const baseUrl = normalizeBaseUrl(options.baseUrl)
const logger = createLogger(options.verbose ?? false)
return {
containerTag,
containerTag: options.containerTag,
customId: options.customId,
apiKey,
baseUrl,
mode: options.mode ?? "profile",
addMemory: options.addMemory ?? "never",
threadId: options.threadId,
addMemory: options.addMemory ?? "always",
logger,
promptTemplate: options.promptTemplate,
memoryCache: new MemoryCache<string>(),
@ -78,19 +77,24 @@ function createProcessorContext(
}
/**
* Gets the effective threadId from options or RequestContext.
* Gets the effective customId from RequestContext (if provided) or falls back to context.
* Per-request thread ID takes precedence to support dynamic per-conversation IDs in server setups.
*/
function getEffectiveThreadId(
function getEffectiveCustomId(
ctx: ProcessorContext,
requestContext?: RequestContext,
): string | undefined {
if (ctx.threadId) {
return ctx.threadId
}
): string {
// Per-request thread ID takes precedence over construction-time customId
if (requestContext) {
return requestContext.get(MASTRA_THREAD_ID_KEY) as string | undefined
const threadId = requestContext.get(MASTRA_THREAD_ID_KEY) as
| string
| undefined
if (threadId) {
return threadId
}
}
return undefined
// Fall back to construction-time customId
return ctx.customId
}
/**
@ -111,7 +115,9 @@ function getEffectiveThreadId(
* name: "My Agent",
* model: openai("gpt-4o"),
* inputProcessors: [
* new SupermemoryInputProcessor("user-123", {
* new SupermemoryInputProcessor({
* containerTag: "user-123",
* customId: "conv-456",
* mode: "full",
* verbose: true,
* }),
@ -125,8 +131,8 @@ export class SupermemoryInputProcessor implements Processor {
private ctx: ProcessorContext
constructor(containerTag: string, options: SupermemoryMastraOptions = {}) {
this.ctx = createProcessorContext(containerTag, options)
constructor(options: SupermemoryMastraOptions) {
this.ctx = createProcessorContext(options)
}
async processInput(args: ProcessInputArgs): Promise<ProcessInputResult> {
@ -146,7 +152,7 @@ export class SupermemoryInputProcessor implements Processor {
return messageList
}
const effectiveThreadId = getEffectiveThreadId(this.ctx, requestContext)
const effectiveThreadId = getEffectiveCustomId(this.ctx, requestContext)
const turnKey = MemoryCache.makeTurnKey(
this.ctx.containerTag,
effectiveThreadId,
@ -213,9 +219,10 @@ export class SupermemoryInputProcessor implements Processor {
* name: "My Agent",
* model: openai("gpt-4o"),
* outputProcessors: [
* new SupermemoryOutputProcessor("user-123", {
* new SupermemoryOutputProcessor({
* containerTag: "user-123",
* customId: "conv-456",
* addMemory: "always",
* threadId: "conv-456",
* }),
* ],
* })
@ -227,26 +234,20 @@ export class SupermemoryOutputProcessor implements Processor {
private ctx: ProcessorContext
constructor(containerTag: string, options: SupermemoryMastraOptions = {}) {
this.ctx = createProcessorContext(containerTag, options)
constructor(options: SupermemoryMastraOptions) {
this.ctx = createProcessorContext(options)
}
async processOutputResult(
args: ProcessOutputResultArgs,
): Promise<MastraDBMessage[]> {
const { messages, messageList, requestContext } = args
const { messages, requestContext } = args
if (this.ctx.addMemory !== "always") {
return messages
}
const effectiveThreadId = getEffectiveThreadId(this.ctx, requestContext)
if (!effectiveThreadId) {
this.ctx.logger.warn(
"No threadId provided for conversation save. Provide via options.threadId or RequestContext.",
)
return messages
}
const effectiveCustomId = getEffectiveCustomId(this.ctx, requestContext)
try {
const conversationMessages = this.convertToConversationMessages(messages)
@ -257,7 +258,7 @@ export class SupermemoryOutputProcessor implements Processor {
}
const response = await addConversation({
conversationId: effectiveThreadId,
conversationId: effectiveCustomId,
messages: conversationMessages,
containerTags: [this.ctx.containerTag],
apiKey: this.ctx.apiKey,
@ -266,7 +267,7 @@ export class SupermemoryOutputProcessor implements Processor {
this.ctx.logger.info("Conversation saved successfully", {
containerTag: this.ctx.containerTag,
conversationId: effectiveThreadId,
customId: effectiveCustomId,
messageCount: conversationMessages.length,
responseId: response.id,
})
@ -323,8 +324,7 @@ export class SupermemoryOutputProcessor implements Processor {
/**
* Creates a Supermemory input processor for memory injection.
*
* @param containerTag - The container tag/user ID for scoping memories
* @param options - Configuration options
* @param options - Configuration options including required containerTag and customId
* @returns Configured SupermemoryInputProcessor instance
*
* @example
@ -333,7 +333,9 @@ export class SupermemoryOutputProcessor implements Processor {
* import { createSupermemoryProcessor } from "@supermemory/tools/mastra"
* import { openai } from "@ai-sdk/openai"
*
* const processor = createSupermemoryProcessor("user-123", {
* const processor = createSupermemoryProcessor({
* containerTag: "user-123",
* customId: "conv-456",
* mode: "full",
* verbose: true,
* })
@ -347,17 +349,15 @@ export class SupermemoryOutputProcessor implements Processor {
* ```
*/
export function createSupermemoryProcessor(
containerTag: string,
options: SupermemoryMastraOptions = {},
options: SupermemoryMastraOptions,
): SupermemoryInputProcessor {
return new SupermemoryInputProcessor(containerTag, options)
return new SupermemoryInputProcessor(options)
}
/**
* Creates a Supermemory output processor for saving conversations.
*
* @param containerTag - The container tag/user ID for scoping memories
* @param options - Configuration options
* @param options - Configuration options including required containerTag and customId
* @returns Configured SupermemoryOutputProcessor instance
*
* @example
@ -366,9 +366,10 @@ export function createSupermemoryProcessor(
* import { createSupermemoryOutputProcessor } from "@supermemory/tools/mastra"
* import { openai } from "@ai-sdk/openai"
*
* const processor = createSupermemoryOutputProcessor("user-123", {
* const processor = createSupermemoryOutputProcessor({
* containerTag: "user-123",
* customId: "conv-456",
* addMemory: "always",
* threadId: "conv-456",
* })
*
* const agent = new Agent({
@ -380,10 +381,9 @@ export function createSupermemoryProcessor(
* ```
*/
export function createSupermemoryOutputProcessor(
containerTag: string,
options: SupermemoryMastraOptions = {},
options: SupermemoryMastraOptions,
): SupermemoryOutputProcessor {
return new SupermemoryOutputProcessor(containerTag, options)
return new SupermemoryOutputProcessor(options)
}
/**
@ -392,7 +392,6 @@ export function createSupermemoryOutputProcessor(
* Use this when you want both memory injection and conversation saving
* with consistent settings across both processors.
*
* @param containerTag - The container tag/user ID for scoping memories
* @param options - Configuration options shared by both processors
* @returns Object containing both input and output processors
*
@ -402,10 +401,11 @@ export function createSupermemoryOutputProcessor(
* import { createSupermemoryProcessors } from "@supermemory/tools/mastra"
* import { openai } from "@ai-sdk/openai"
*
* const { input, output } = createSupermemoryProcessors("user-123", {
* const { input, output } = createSupermemoryProcessors({
* containerTag: "user-123",
* customId: "conv-456",
* mode: "full",
* addMemory: "always",
* threadId: "conv-456",
* })
*
* const agent = new Agent({
@ -418,14 +418,13 @@ export function createSupermemoryOutputProcessor(
* ```
*/
export function createSupermemoryProcessors(
containerTag: string,
options: SupermemoryMastraOptions = {},
options: SupermemoryMastraOptions,
): {
input: SupermemoryInputProcessor
output: SupermemoryOutputProcessor
} {
return {
input: new SupermemoryInputProcessor(containerTag, options),
output: new SupermemoryOutputProcessor(containerTag, options),
input: new SupermemoryInputProcessor(options),
output: new SupermemoryOutputProcessor(options),
}
}

View file

@ -10,7 +10,6 @@ import type {
MemoryMode,
AddMemoryMode,
MemoryPromptData,
SupermemoryBaseOptions,
} from "../shared"
// Re-export Mastra core types for consumers
@ -34,14 +33,24 @@ export type { RequestContext } from "@mastra/core/request-context"
/**
* Configuration options for the Supermemory Mastra processor.
* Extends base options with Mastra-specific settings.
*/
export interface SupermemoryMastraOptions extends SupermemoryBaseOptions {
/**
* When using the output processor, set this to enable automatic conversation saving.
* The threadId is used to group messages into a single conversation.
*/
threadId?: string
export interface SupermemoryMastraOptions {
/** Container tag/user ID for scoping memories. Required. */
containerTag: string
/** Custom ID to group messages into a single document for contextual memory generation. Required. */
customId: string
/** Supermemory API key (falls back to SUPERMEMORY_API_KEY env var) */
apiKey?: string
/** Custom Supermemory API base URL */
baseUrl?: string
/** Memory retrieval mode */
mode?: MemoryMode
/** Memory persistence mode (default: "always") */
addMemory?: AddMemoryMode
/** Enable detailed logging of memory search and injection */
verbose?: boolean
/** Custom function to format memory data into the system prompt */
promptTemplate?: PromptTemplate
}
export type { PromptTemplate, MemoryMode, AddMemoryMode, MemoryPromptData }

View file

@ -34,11 +34,10 @@ interface AgentConfig {
*
* The enhanced config includes:
* - Input processor: Fetches relevant memories before LLM calls
* - Output processor: Optionally saves conversations after responses
* - Output processor: Saves conversations after responses (when addMemory is "always")
*
* @param config - The Mastra agent configuration to enhance
* @param containerTag - The container tag/user ID for scoping memories
* @param options - Configuration options for memory behavior
* @param options - Configuration options including required containerTag and customId
* @returns Enhanced agent config with Supermemory processors injected
*
* @example
@ -54,11 +53,11 @@ interface AgentConfig {
* model: openai("gpt-4o"),
* instructions: "You are a helpful assistant.",
* },
* "user-123",
* {
* containerTag: "user-123",
* customId: "conv-456",
* mode: "full",
* addMemory: "always",
* threadId: "conv-456",
* }
* )
*
@ -69,13 +68,25 @@ interface AgentConfig {
*/
export function withSupermemory<T extends AgentConfig>(
config: T,
containerTag: string,
options: SupermemoryMastraOptions = {},
options: SupermemoryMastraOptions,
): T {
// Runtime guard for breaking API change - catch old 3-arg signature usage
if (
typeof options !== "object" ||
options === null ||
!options.containerTag ||
!options.customId
) {
throw new Error(
"withSupermemory: options must be an object with required containerTag and customId fields. " +
"The API changed in v2.0.0 — see https://docs.supermemory.ai/integrations/mastra for the new signature.",
)
}
validateApiKey(options.apiKey)
const inputProcessor = new SupermemoryInputProcessor(containerTag, options)
const outputProcessor = new SupermemoryOutputProcessor(containerTag, options)
const inputProcessor = new SupermemoryInputProcessor(options)
const outputProcessor = new SupermemoryOutputProcessor(options)
const existingInputProcessors = config.inputProcessors ?? []
const existingOutputProcessors = config.outputProcessors ?? []

View file

@ -37,6 +37,7 @@ const INTEGRATION_CONFIG = {
apiKey: process.env.SUPERMEMORY_API_KEY || "",
baseUrl: process.env.SUPERMEMORY_BASE_URL || "https://api.supermemory.ai",
containerTag: "integration-test-mastra",
customId: "integration-test-conversation",
}
const shouldRunIntegration = !!process.env.SUPERMEMORY_API_KEY
@ -100,14 +101,13 @@ describe.skipIf(!shouldRunIntegration)(
() => {
describe("SupermemoryInputProcessor", () => {
it("should fetch real memories and inject into messageList", async () => {
const processor = new SupermemoryInputProcessor(
INTEGRATION_CONFIG.containerTag,
{
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
},
)
const processor = new SupermemoryInputProcessor({
containerTag: INTEGRATION_CONFIG.containerTag,
customId: INTEGRATION_CONFIG.customId,
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
})
const messageList = createIntegrationMessageList()
const messages: MastraDBMessage[] = [
@ -132,14 +132,13 @@ describe.skipIf(!shouldRunIntegration)(
it("should use query mode with user message as search query", async () => {
const fetchSpy = vi.spyOn(globalThis, "fetch")
const processor = new SupermemoryInputProcessor(
INTEGRATION_CONFIG.containerTag,
{
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "query",
},
)
const processor = new SupermemoryInputProcessor({
containerTag: INTEGRATION_CONFIG.containerTag,
customId: INTEGRATION_CONFIG.customId,
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "query",
})
const messageList = createIntegrationMessageList()
const args: ProcessInputArgs = {
@ -177,14 +176,13 @@ describe.skipIf(!shouldRunIntegration)(
it("should use full mode with both profile and query", async () => {
const fetchSpy = vi.spyOn(globalThis, "fetch")
const processor = new SupermemoryInputProcessor(
INTEGRATION_CONFIG.containerTag,
{
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "full",
},
)
const processor = new SupermemoryInputProcessor({
containerTag: INTEGRATION_CONFIG.containerTag,
customId: INTEGRATION_CONFIG.customId,
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "full",
})
const messageList = createIntegrationMessageList()
const args: ProcessInputArgs = {
@ -217,14 +215,13 @@ describe.skipIf(!shouldRunIntegration)(
it("should cache memories for repeated calls with same message", async () => {
const fetchSpy = vi.spyOn(globalThis, "fetch")
const processor = new SupermemoryInputProcessor(
INTEGRATION_CONFIG.containerTag,
{
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
},
)
const processor = new SupermemoryInputProcessor({
containerTag: INTEGRATION_CONFIG.containerTag,
customId: INTEGRATION_CONFIG.customId,
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
})
const messages: MastraDBMessage[] = [
createMessage("user", "Cache test message"),
@ -269,15 +266,14 @@ describe.skipIf(!shouldRunIntegration)(
generalSearchMemories: string
}) => `<mastra-memories>${data.userMemories}</mastra-memories>`
const processor = new SupermemoryInputProcessor(
INTEGRATION_CONFIG.containerTag,
{
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
promptTemplate: customTemplate,
},
)
const processor = new SupermemoryInputProcessor({
containerTag: INTEGRATION_CONFIG.containerTag,
customId: INTEGRATION_CONFIG.customId,
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
promptTemplate: customTemplate,
})
const messageList = createIntegrationMessageList()
const args: ProcessInputArgs = {
@ -299,17 +295,15 @@ describe.skipIf(!shouldRunIntegration)(
it("should save conversation when addMemory is always", async () => {
const fetchSpy = vi.spyOn(globalThis, "fetch")
const threadId = `test-mastra-${Date.now()}`
const customId = `test-mastra-${Date.now()}`
const processor = new SupermemoryOutputProcessor(
INTEGRATION_CONFIG.containerTag,
{
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
addMemory: "always",
threadId,
},
)
const processor = new SupermemoryOutputProcessor({
containerTag: INTEGRATION_CONFIG.containerTag,
customId,
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
addMemory: "always",
})
const args: ProcessOutputResultArgs = {
messages: [
@ -336,15 +330,13 @@ describe.skipIf(!shouldRunIntegration)(
it("should not save when addMemory is never", async () => {
const fetchSpy = vi.spyOn(globalThis, "fetch")
const processor = new SupermemoryOutputProcessor(
INTEGRATION_CONFIG.containerTag,
{
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
addMemory: "never",
threadId: "test-thread",
},
)
const processor = new SupermemoryOutputProcessor({
containerTag: INTEGRATION_CONFIG.containerTag,
customId: "test-thread",
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
addMemory: "never",
})
const args: ProcessOutputResultArgs = {
messages: [
@ -368,17 +360,16 @@ describe.skipIf(!shouldRunIntegration)(
fetchSpy.mockRestore()
})
it("should use threadId from RequestContext when not in options", async () => {
it("should use threadId from RequestContext when available", async () => {
const fetchSpy = vi.spyOn(globalThis, "fetch")
const processor = new SupermemoryOutputProcessor(
INTEGRATION_CONFIG.containerTag,
{
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
addMemory: "always",
},
)
const processor = new SupermemoryOutputProcessor({
containerTag: INTEGRATION_CONFIG.containerTag,
customId: INTEGRATION_CONFIG.customId,
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
addMemory: "always",
})
const contextThreadId = `context-thread-${Date.now()}`
const requestContext = new RequestContext()
@ -410,16 +401,14 @@ describe.skipIf(!shouldRunIntegration)(
describe("createSupermemoryProcessors", () => {
it("should create working input and output processors", async () => {
const { input, output } = createSupermemoryProcessors(
INTEGRATION_CONFIG.containerTag,
{
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
addMemory: "always",
threadId: `processors-test-${Date.now()}`,
},
)
const { input, output } = createSupermemoryProcessors({
containerTag: INTEGRATION_CONFIG.containerTag,
customId: `processors-test-${Date.now()}`,
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
addMemory: "always",
})
const messageList = createIntegrationMessageList()
const inputArgs: ProcessInputArgs = {
@ -455,17 +444,14 @@ describe.skipIf(!shouldRunIntegration)(
model: "gpt-4o",
}
const enhanced = withSupermemory(
config,
INTEGRATION_CONFIG.containerTag,
{
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
addMemory: "always",
threadId: `wrapper-test-${Date.now()}`,
},
)
const enhanced = withSupermemory(config, {
containerTag: INTEGRATION_CONFIG.containerTag,
customId: `wrapper-test-${Date.now()}`,
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
addMemory: "always",
})
expect(enhanced.id).toBe("test-mastra-agent")
expect(enhanced.name).toBe("Test Mastra Agent")
@ -511,15 +497,13 @@ describe.skipIf(!shouldRunIntegration)(
outputProcessors: [existingOutputProcessor],
}
const enhanced = withSupermemory(
config,
INTEGRATION_CONFIG.containerTag,
{
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
},
)
const enhanced = withSupermemory(config, {
containerTag: INTEGRATION_CONFIG.containerTag,
customId: INTEGRATION_CONFIG.customId,
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
})
expect(enhanced.inputProcessors).toHaveLength(2)
expect(enhanced.outputProcessors).toHaveLength(2)
@ -534,15 +518,14 @@ describe.skipIf(!shouldRunIntegration)(
describe("Options", () => {
it("verbose mode should not break functionality", async () => {
const processor = new SupermemoryInputProcessor(
INTEGRATION_CONFIG.containerTag,
{
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
verbose: true,
},
)
const processor = new SupermemoryInputProcessor({
containerTag: INTEGRATION_CONFIG.containerTag,
customId: INTEGRATION_CONFIG.customId,
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
verbose: true,
})
const messageList = createIntegrationMessageList()
const args: ProcessInputArgs = {
@ -561,14 +544,13 @@ describe.skipIf(!shouldRunIntegration)(
it("custom baseUrl should be used for API calls", async () => {
const fetchSpy = vi.spyOn(globalThis, "fetch")
const processor = new SupermemoryInputProcessor(
INTEGRATION_CONFIG.containerTag,
{
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
},
)
const processor = new SupermemoryInputProcessor({
containerTag: INTEGRATION_CONFIG.containerTag,
customId: INTEGRATION_CONFIG.customId,
apiKey: INTEGRATION_CONFIG.apiKey,
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
})
const args: ProcessInputArgs = {
messages: [createMessage("user", "Base URL test")],
@ -595,14 +577,13 @@ describe.skipIf(!shouldRunIntegration)(
describe("Error handling", () => {
it("should handle invalid API key gracefully", async () => {
const processor = new SupermemoryInputProcessor(
INTEGRATION_CONFIG.containerTag,
{
apiKey: "invalid-api-key-12345",
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
},
)
const processor = new SupermemoryInputProcessor({
containerTag: INTEGRATION_CONFIG.containerTag,
customId: INTEGRATION_CONFIG.customId,
apiKey: "invalid-api-key-12345",
baseUrl: INTEGRATION_CONFIG.baseUrl,
mode: "profile",
})
const messageList = createIntegrationMessageList()
const args: ProcessInputArgs = {
@ -619,15 +600,13 @@ describe.skipIf(!shouldRunIntegration)(
})
it("output processor should handle save errors gracefully", async () => {
const processor = new SupermemoryOutputProcessor(
INTEGRATION_CONFIG.containerTag,
{
apiKey: "invalid-api-key-12345",
baseUrl: INTEGRATION_CONFIG.baseUrl,
addMemory: "always",
threadId: "error-test",
},
)
const processor = new SupermemoryOutputProcessor({
containerTag: INTEGRATION_CONFIG.containerTag,
customId: "error-test",
apiKey: "invalid-api-key-12345",
baseUrl: INTEGRATION_CONFIG.baseUrl,
addMemory: "always",
})
const args: ProcessOutputResultArgs = {
messages: [

View file

@ -29,6 +29,7 @@ const TEST_CONFIG = {
apiKey: "test-api-key",
baseUrl: "https://api.supermemory.ai",
containerTag: "test-mastra-user",
customId: "test-conversation",
}
interface MockAgentConfig {
@ -128,8 +129,11 @@ describe("SupermemoryInputProcessor", () => {
})
describe("constructor", () => {
it("should create processor with default options", () => {
const processor = new SupermemoryInputProcessor(TEST_CONFIG.containerTag)
it("should create processor with required options", () => {
const processor = new SupermemoryInputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
})
expect(processor.id).toBe("supermemory-input")
expect(processor.name).toBe("Supermemory Memory Injection")
})
@ -138,19 +142,21 @@ describe("SupermemoryInputProcessor", () => {
delete process.env.SUPERMEMORY_API_KEY
expect(() => {
new SupermemoryInputProcessor(TEST_CONFIG.containerTag)
new SupermemoryInputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
})
}).toThrow("SUPERMEMORY_API_KEY is not set")
})
it("should accept API key via options", () => {
delete process.env.SUPERMEMORY_API_KEY
const processor = new SupermemoryInputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: "custom-key",
},
)
const processor = new SupermemoryInputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
apiKey: "custom-key",
})
expect(processor.id).toBe("supermemory-input")
})
})
@ -168,13 +174,12 @@ describe("SupermemoryInputProcessor", () => {
),
})
const processor = new SupermemoryInputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: TEST_CONFIG.apiKey,
mode: "profile",
},
)
const processor = new SupermemoryInputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
apiKey: TEST_CONFIG.apiKey,
mode: "profile",
})
const messageList = createMockMessageList()
const messages: MastraDBMessage[] = [createMessage("user", "Hello")]
@ -203,13 +208,12 @@ describe("SupermemoryInputProcessor", () => {
Promise.resolve(createMockProfileResponse(["Cached memory"])),
})
const processor = new SupermemoryInputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: TEST_CONFIG.apiKey,
mode: "profile",
},
)
const processor = new SupermemoryInputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
apiKey: TEST_CONFIG.apiKey,
mode: "profile",
})
const messages: MastraDBMessage[] = [createMessage("user", "Hello")]
@ -249,13 +253,12 @@ describe("SupermemoryInputProcessor", () => {
})
})
const processor = new SupermemoryInputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: TEST_CONFIG.apiKey,
mode: "query",
},
)
const processor = new SupermemoryInputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
apiKey: TEST_CONFIG.apiKey,
mode: "query",
})
const args1: ProcessInputArgs = {
messages: [createMessage("user", "First message")],
@ -281,13 +284,12 @@ describe("SupermemoryInputProcessor", () => {
})
it("should return messageList in query mode when no user message", async () => {
const processor = new SupermemoryInputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: TEST_CONFIG.apiKey,
mode: "query",
},
)
const processor = new SupermemoryInputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
apiKey: TEST_CONFIG.apiKey,
mode: "query",
})
const messageList = createMockMessageList()
const args: ProcessInputArgs = {
@ -313,13 +315,12 @@ describe("SupermemoryInputProcessor", () => {
text: () => Promise.resolve("Server error"),
})
const processor = new SupermemoryInputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: TEST_CONFIG.apiKey,
mode: "profile",
},
)
const processor = new SupermemoryInputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
apiKey: TEST_CONFIG.apiKey,
mode: "profile",
})
const messageList = createMockMessageList()
const args: ProcessInputArgs = {
@ -342,14 +343,12 @@ describe("SupermemoryInputProcessor", () => {
json: () => Promise.resolve(createMockProfileResponse(["Memory"])),
})
const processor = new SupermemoryInputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: TEST_CONFIG.apiKey,
threadId: "thread-123",
mode: "profile",
},
)
const processor = new SupermemoryInputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: "thread-123",
apiKey: TEST_CONFIG.apiKey,
mode: "profile",
})
const args: ProcessInputArgs = {
messages: [createMessage("user", "Hello")],
@ -370,13 +369,12 @@ describe("SupermemoryInputProcessor", () => {
json: () => Promise.resolve(createMockProfileResponse(["Memory"])),
})
const processor = new SupermemoryInputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: TEST_CONFIG.apiKey,
mode: "profile",
},
)
const processor = new SupermemoryInputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
apiKey: TEST_CONFIG.apiKey,
mode: "profile",
})
const requestContext = new RequestContext()
requestContext.set(MASTRA_THREAD_ID_KEY, "ctx-thread-456")
@ -401,13 +399,12 @@ describe("SupermemoryInputProcessor", () => {
json: () => Promise.resolve(createMockProfileResponse(["Memory"])),
})
const processor = new SupermemoryInputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: TEST_CONFIG.apiKey,
mode: "query",
},
)
const processor = new SupermemoryInputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
apiKey: TEST_CONFIG.apiKey,
mode: "query",
})
const messages: MastraDBMessage[] = [
{
@ -464,8 +461,11 @@ describe("SupermemoryOutputProcessor", () => {
})
describe("constructor", () => {
it("should create processor with default options", () => {
const processor = new SupermemoryOutputProcessor(TEST_CONFIG.containerTag)
it("should create processor with required options", () => {
const processor = new SupermemoryOutputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
})
expect(processor.id).toBe("supermemory-output")
expect(processor.name).toBe("Supermemory Conversation Save")
})
@ -478,14 +478,12 @@ describe("SupermemoryOutputProcessor", () => {
json: () => Promise.resolve(createMockConversationResponse()),
})
const processor = new SupermemoryOutputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: TEST_CONFIG.apiKey,
addMemory: "always",
threadId: "conv-456",
},
)
const processor = new SupermemoryOutputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: "conv-456",
apiKey: TEST_CONFIG.apiKey,
addMemory: "always",
})
const messages: MastraDBMessage[] = [
createMessage("user", "Hello"),
@ -522,14 +520,12 @@ describe("SupermemoryOutputProcessor", () => {
})
it("should not save conversation when addMemory is never", async () => {
const processor = new SupermemoryOutputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: TEST_CONFIG.apiKey,
addMemory: "never",
threadId: "conv-456",
},
)
const processor = new SupermemoryOutputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: "conv-456",
apiKey: TEST_CONFIG.apiKey,
addMemory: "never",
})
const args: ProcessOutputResultArgs = {
messages: [
@ -546,43 +542,50 @@ describe("SupermemoryOutputProcessor", () => {
expect(fetchMock).not.toHaveBeenCalled()
})
it("should not save when no threadId provided", async () => {
const processor = new SupermemoryOutputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: TEST_CONFIG.apiKey,
addMemory: "always",
},
)
const args: ProcessOutputResultArgs = {
messages: [
createMessage("user", "Hello"),
createMessage("assistant", "Hi!"),
],
messageList: createMockMessageList(),
abort: vi.fn() as never,
retryCount: 0,
}
await processor.processOutputResult(args)
expect(fetchMock).not.toHaveBeenCalled()
})
it("should use threadId from requestContext", async () => {
it("should use customId from options for conversation save", async () => {
fetchMock.mockResolvedValue({
ok: true,
json: () => Promise.resolve(createMockConversationResponse()),
})
const processor = new SupermemoryOutputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: TEST_CONFIG.apiKey,
addMemory: "always",
},
const processor = new SupermemoryOutputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: "my-custom-id",
apiKey: TEST_CONFIG.apiKey,
addMemory: "always",
})
const args: ProcessOutputResultArgs = {
messages: [
createMessage("user", "Hello"),
createMessage("assistant", "Hi!"),
],
messageList: createMockMessageList(),
abort: vi.fn() as never,
retryCount: 0,
}
await processor.processOutputResult(args)
expect(fetchMock).toHaveBeenCalledTimes(1)
const callBody = JSON.parse(
(fetchMock.mock.calls[0]?.[1] as { body: string }).body,
)
expect(callBody.conversationId).toBe("my-custom-id")
})
it("should use threadId from requestContext (takes precedence over customId)", async () => {
fetchMock.mockResolvedValue({
ok: true,
json: () => Promise.resolve(createMockConversationResponse()),
})
const processor = new SupermemoryOutputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: "fallback-custom-id",
apiKey: TEST_CONFIG.apiKey,
addMemory: "always",
})
const requestContext = new RequestContext()
requestContext.set(MASTRA_THREAD_ID_KEY, "ctx-thread-789")
@ -604,23 +607,55 @@ describe("SupermemoryOutputProcessor", () => {
const callBody = JSON.parse(
(fetchMock.mock.calls[0]?.[1] as { body: string }).body,
)
// RequestContext threadId takes precedence for per-request dynamic IDs
expect(callBody.conversationId).toBe("ctx-thread-789")
})
it("should fall back to customId when requestContext has no threadId", async () => {
fetchMock.mockResolvedValue({
ok: true,
json: () => Promise.resolve(createMockConversationResponse()),
})
const processor = new SupermemoryOutputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: "fallback-custom-id",
apiKey: TEST_CONFIG.apiKey,
addMemory: "always",
})
const args: ProcessOutputResultArgs = {
messages: [
createMessage("user", "Hello"),
createMessage("assistant", "Hi!"),
],
messageList: createMockMessageList(),
abort: vi.fn() as never,
retryCount: 0,
}
await processor.processOutputResult(args)
expect(fetchMock).toHaveBeenCalledTimes(1)
const callBody = JSON.parse(
(fetchMock.mock.calls[0]?.[1] as { body: string }).body,
)
// Falls back to customId when no RequestContext threadId
expect(callBody.conversationId).toBe("fallback-custom-id")
})
it("should skip system messages when saving", async () => {
fetchMock.mockResolvedValue({
ok: true,
json: () => Promise.resolve(createMockConversationResponse()),
})
const processor = new SupermemoryOutputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: TEST_CONFIG.apiKey,
addMemory: "always",
threadId: "conv-456",
},
)
const processor = new SupermemoryOutputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: "conv-456",
apiKey: TEST_CONFIG.apiKey,
addMemory: "always",
})
const messages: MastraDBMessage[] = [
createMessage("system", "You are a helpful assistant"),
@ -652,14 +687,12 @@ describe("SupermemoryOutputProcessor", () => {
json: () => Promise.resolve(createMockConversationResponse()),
})
const processor = new SupermemoryOutputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: TEST_CONFIG.apiKey,
addMemory: "always",
threadId: "conv-456",
},
)
const processor = new SupermemoryOutputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: "conv-456",
apiKey: TEST_CONFIG.apiKey,
addMemory: "always",
})
const messages: MastraDBMessage[] = [
{
@ -708,14 +741,12 @@ describe("SupermemoryOutputProcessor", () => {
text: () => Promise.resolve("Server error"),
})
const processor = new SupermemoryOutputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: TEST_CONFIG.apiKey,
addMemory: "always",
threadId: "conv-456",
},
)
const processor = new SupermemoryOutputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: "conv-456",
apiKey: TEST_CONFIG.apiKey,
addMemory: "always",
})
const args: ProcessOutputResultArgs = {
messages: [
@ -732,14 +763,12 @@ describe("SupermemoryOutputProcessor", () => {
})
it("should not save when no messages to save", async () => {
const processor = new SupermemoryOutputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: TEST_CONFIG.apiKey,
addMemory: "always",
threadId: "conv-456",
},
)
const processor = new SupermemoryOutputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: "conv-456",
apiKey: TEST_CONFIG.apiKey,
addMemory: "always",
})
const args: ProcessOutputResultArgs = {
messages: [],
@ -773,13 +802,18 @@ describe("Factory functions", () => {
describe("createSupermemoryProcessor", () => {
it("should create input processor", () => {
const processor = createSupermemoryProcessor(TEST_CONFIG.containerTag)
const processor = createSupermemoryProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
})
expect(processor).toBeInstanceOf(SupermemoryInputProcessor)
expect(processor.id).toBe("supermemory-input")
})
it("should pass options to processor", () => {
const processor = createSupermemoryProcessor(TEST_CONFIG.containerTag, {
const processor = createSupermemoryProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
apiKey: "custom-key",
mode: "full",
})
@ -789,45 +823,43 @@ describe("Factory functions", () => {
describe("createSupermemoryOutputProcessor", () => {
it("should create output processor", () => {
const processor = createSupermemoryOutputProcessor(
TEST_CONFIG.containerTag,
)
const processor = createSupermemoryOutputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
})
expect(processor).toBeInstanceOf(SupermemoryOutputProcessor)
expect(processor.id).toBe("supermemory-output")
})
it("should pass options to processor", () => {
const processor = createSupermemoryOutputProcessor(
TEST_CONFIG.containerTag,
{
apiKey: "custom-key",
addMemory: "always",
threadId: "conv-123",
},
)
const processor = createSupermemoryOutputProcessor({
containerTag: TEST_CONFIG.containerTag,
customId: "conv-123",
apiKey: "custom-key",
addMemory: "always",
})
expect(processor).toBeInstanceOf(SupermemoryOutputProcessor)
})
})
describe("createSupermemoryProcessors", () => {
it("should create both input and output processors", () => {
const { input, output } = createSupermemoryProcessors(
TEST_CONFIG.containerTag,
)
const { input, output } = createSupermemoryProcessors({
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
})
expect(input).toBeInstanceOf(SupermemoryInputProcessor)
expect(output).toBeInstanceOf(SupermemoryOutputProcessor)
})
it("should share options between processors", () => {
const { input, output } = createSupermemoryProcessors(
TEST_CONFIG.containerTag,
{
apiKey: "custom-key",
mode: "full",
addMemory: "always",
threadId: "conv-123",
},
)
const { input, output } = createSupermemoryProcessors({
containerTag: TEST_CONFIG.containerTag,
customId: "conv-123",
apiKey: "custom-key",
mode: "full",
addMemory: "always",
})
expect(input.id).toBe("supermemory-input")
expect(output.id).toBe("supermemory-output")
})
@ -857,7 +889,10 @@ describe("withSupermemory", () => {
const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" }
expect(() => {
withSupermemory(config, TEST_CONFIG.containerTag)
withSupermemory(config, {
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
})
}).toThrow("SUPERMEMORY_API_KEY is not set")
})
@ -865,7 +900,9 @@ describe("withSupermemory", () => {
delete process.env.SUPERMEMORY_API_KEY
const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" }
const enhanced = withSupermemory(config, TEST_CONFIG.containerTag, {
const enhanced = withSupermemory(config, {
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
apiKey: "custom-key",
})
@ -877,7 +914,10 @@ describe("withSupermemory", () => {
describe("processor injection", () => {
it("should inject input and output processors", () => {
const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" }
const enhanced = withSupermemory(config, TEST_CONFIG.containerTag)
const enhanced = withSupermemory(config, {
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
})
expect(enhanced.inputProcessors).toHaveLength(1)
expect(enhanced.outputProcessors).toHaveLength(1)
@ -892,7 +932,10 @@ describe("withSupermemory", () => {
model: "gpt-4",
customProp: "value",
}
const enhanced = withSupermemory(config, TEST_CONFIG.containerTag)
const enhanced = withSupermemory(config, {
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
})
expect(enhanced.id).toBe("test-agent")
expect(enhanced.name).toBe("Test Agent")
@ -911,7 +954,10 @@ describe("withSupermemory", () => {
inputProcessors: [existingInputProcessor],
}
const enhanced = withSupermemory(config, TEST_CONFIG.containerTag)
const enhanced = withSupermemory(config, {
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
})
expect(enhanced.inputProcessors).toHaveLength(2)
expect(enhanced.inputProcessors?.[0]?.id).toBe("supermemory-input")
@ -929,7 +975,10 @@ describe("withSupermemory", () => {
outputProcessors: [existingOutputProcessor],
}
const enhanced = withSupermemory(config, TEST_CONFIG.containerTag)
const enhanced = withSupermemory(config, {
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
})
expect(enhanced.outputProcessors).toHaveLength(2)
expect(enhanced.outputProcessors?.[0]?.id).toBe("existing-output")
@ -946,7 +995,10 @@ describe("withSupermemory", () => {
outputProcessors: [existingOutput],
}
const enhanced = withSupermemory(config, TEST_CONFIG.containerTag)
const enhanced = withSupermemory(config, {
containerTag: TEST_CONFIG.containerTag,
customId: TEST_CONFIG.customId,
})
expect(enhanced.inputProcessors).toHaveLength(2)
expect(enhanced.outputProcessors).toHaveLength(2)
@ -960,10 +1012,11 @@ describe("withSupermemory", () => {
describe("options passthrough", () => {
it("should pass options to processors", () => {
const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" }
const enhanced = withSupermemory(config, TEST_CONFIG.containerTag, {
const enhanced = withSupermemory(config, {
containerTag: TEST_CONFIG.containerTag,
customId: "conv-123",
mode: "full",
addMemory: "always",
threadId: "conv-123",
verbose: true,
})