updated typescript and python sdk (#878)

This commit is contained in:
sreedharsreeram 2026-04-25 01:43:13 +00:00
parent d7ee078ae9
commit 589edd3437
6 changed files with 88 additions and 92 deletions

View file

@ -44,9 +44,11 @@ import { withSupermemory } from "@supermemory/tools/openai"
const openai = new OpenAI()
// Wrap client with memory - memories auto-injected into system prompts
const client = withSupermemory(openai, "user-123", {
mode: "full", // "profile" | "query" | "full"
addMemory: "always", // "always" | "never"
const client = withSupermemory(openai, {
containerTag: "user-123", // Required: identifies the user/container
customId: "conversation-456", // Required: groups messages into the same document
mode: "full", // "profile" | "query" | "full"
addMemory: "always", // "always" (default) | "never"
})
// Use normally - memories are automatically included
@ -62,16 +64,19 @@ const response = await client.chat.completions.create({
### Configuration Options
```typescript
const client = withSupermemory(openai, "user-123", {
const client = withSupermemory(openai, {
// Required: identifies the user/container
containerTag: "user-123",
// Required: Group messages into the same document
customId: "conv-456",
// Memory search mode
mode: "full", // "profile" (user profile only), "query" (search only), "full" (both)
// Auto-save conversations as memories
// Auto-save conversations as memories (default: "always")
addMemory: "always", // "always" | "never"
// Group messages into conversations
conversationId: "conv-456",
// Enable debug logging
verbose: true,
@ -91,7 +96,7 @@ const client = withSupermemory(openai, "user-123", {
### Works with Responses API Too
```typescript
const client = withSupermemory(openai, "user-123", { mode: "full" })
const client = withSupermemory(openai, { containerTag: "user-123", customId: "conv-456", mode: "full" })
// Memories injected into instructions
const response = await client.responses.create({

View file

@ -268,10 +268,11 @@ The `withSupermemory` function creates an OpenAI client with SuperMemory middlew
import { withSupermemory } from "@supermemory/tools/openai"
// Create OpenAI client with supermemory middleware
const openaiWithSupermemory = withSupermemory("user-123", {
conversationId: "conversation-456",
const openaiWithSupermemory = withSupermemory(openai, {
containerTag: "user-123", // Required: identifies the user/container
customId: "conversation-456", // Required: groups messages into the same document
mode: "full",
addMemory: "always",
addMemory: "always", // Default: "always"
verbose: true,
})
@ -291,37 +292,12 @@ console.log(completion.choices[0]?.message?.content)
The middleware supports the same configuration options as the AI SDK version:
```typescript
const openaiWithSupermemory = withSupermemory("user-123", {
conversationId: "conversation-456", // Group messages for contextual memory
mode: "full", // "profile" | "query" | "full"
addMemory: "always", // "always" | "never"
verbose: true, // Enable detailed logging
})
```
#### Advanced Usage with Custom OpenAI Options
You can also pass custom OpenAI client options:
```typescript
import { withSupermemory } from "@supermemory/tools/openai"
const openaiWithSupermemory = withSupermemory(
"user-123",
{
mode: "profile",
addMemory: "always",
},
{
baseURL: "https://api.openai.com/v1",
organization: "org-123",
},
"custom-api-key" // Optional: custom API key
)
const completion = await openaiWithSupermemory.chat.completions.create({
model: "gpt-4o-mini",
messages: [{ role: "user", content: "Tell me about my preferences" }],
const openaiWithSupermemory = withSupermemory(openai, {
containerTag: "user-123", // Required: identifies the user/container
customId: "conversation-456", // Required: groups messages for contextual memory
mode: "full", // "profile" | "query" | "full"
addMemory: "always", // "always" (default) | "never"
verbose: true, // Enable detailed logging
})
```
@ -340,8 +316,9 @@ export async function POST(req: Request) {
conversationId: string
}
const openaiWithSupermemory = withSupermemory("user-123", {
conversationId,
const openaiWithSupermemory = withSupermemory(openai, {
containerTag: "user-123",
customId: conversationId,
mode: "full",
addMemory: "always",
verbose: true,
@ -670,11 +647,11 @@ The `withSupermemory` middleware accepts a configuration object as the second ar
```typescript
interface WithSupermemoryOptions {
containerTag: string
customId: string
containerTag: string // Required: identifies the user/container
customId: string // Required: groups messages into the same document
verbose?: boolean
mode?: "profile" | "query" | "full"
addMemory?: "always" | "never"
addMemory?: "always" | "never" // Default: "always"
/** Optional Supermemory API key. Use this in browser environments. */
apiKey?: string
baseUrl?: string
@ -687,7 +664,7 @@ interface WithSupermemoryOptions {
- **customId**: Required. Custom ID to group messages into a single document for contextual memory generation
- **verbose**: Enable detailed logging of memory search and injection process (default: false)
- **mode**: Memory search mode - "profile" (default), "query", or "full"
- **addMemory**: Automatic memory storage mode - "always" or "never" (default: "never")
- **addMemory**: Automatic memory storage mode - "always" (default) or "never"
- **skipMemoryOnError**: If memory retrieval fails or hits the internal timeout, continue with the original prompt (default: true)
## Available Tools

View file

@ -15,12 +15,12 @@ import {
* the instructions parameter (appends to existing or creates new instructions).
*
* @param openaiClient - The OpenAI client to wrap with SuperMemory middleware
* @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID)
* @param options - Optional configuration options for the middleware
* @param options.conversationId - Optional conversation ID to group messages into a single document for contextual memory generation
* @param options - Configuration options for the middleware
* @param options.containerTag - Required. The container tag/identifier for memory search (e.g., user ID, project ID)
* @param options.customId - Required. Custom ID to group messages into a single document for contextual memory generation
* @param options.verbose - Optional flag to enable detailed logging of memory search and injection process (default: false)
* @param options.mode - Optional mode for memory search: "profile" (default), "query", or "full"
* @param options.addMemory - Optional mode for memory addition: "always", "never" (default)
* @param options.addMemory - Optional mode for memory addition: "always" (default), "never"
*
* @returns An OpenAI client with SuperMemory middleware injected for both Chat Completions and Responses APIs
*
@ -33,8 +33,9 @@ import {
* const openai = new OpenAI({
* apiKey: process.env.OPENAI_API_KEY,
* })
* const openaiWithSupermemory = withSupermemory(openai, "user-123", {
* conversationId: "conversation-456",
* const openaiWithSupermemory = withSupermemory(openai, {
* containerTag: "user-123",
* customId: "conversation-456",
* mode: "full",
* addMemory: "always"
* })
@ -60,28 +61,37 @@ import {
*/
export function withSupermemory(
openaiClient: OpenAI,
containerTag: string,
options?: OpenAIMiddlewareOptions,
options: OpenAIMiddlewareOptions,
) {
if (!process.env.SUPERMEMORY_API_KEY) {
throw new Error("SUPERMEMORY_API_KEY is not set")
}
const conversationId = options?.conversationId
const verbose = options?.verbose ?? false
const mode = options?.mode ?? "profile"
const addMemory = options?.addMemory ?? "never"
const baseUrl = options?.baseUrl
if (!options.containerTag) {
throw new Error(
"containerTag is required — provide a non-empty string to identify the user/container",
)
}
if (!options.customId) {
throw new Error(
"customId is required — provide a non-empty string to group messages into a single document",
)
}
const { containerTag } = options
const verbose = options.verbose ?? false
const mode = options.mode ?? "profile"
const addMemory = options.addMemory ?? "always"
const openaiWithSupermemory = createOpenAIMiddleware(
openaiClient,
containerTag,
{
conversationId,
...options,
verbose,
mode,
addMemory,
baseUrl,
},
)

View file

@ -12,7 +12,10 @@ const normalizeBaseUrl = (url?: string): string => {
}
export interface OpenAIMiddlewareOptions {
conversationId?: string
/** Container tag/identifier for memory search (e.g., user ID, project ID). Required. */
containerTag: string
/** Custom ID to group messages into a single document. Required. */
customId: string
verbose?: boolean
mode?: "profile" | "query" | "full"
addMemory?: "always" | "never"
@ -338,11 +341,13 @@ const addMemoryTool = async (
text: (c as { type: "text"; text: string }).text,
}))
: "",
...((msg as any).name && { name: (msg as any).name }),
...((msg as any).tool_calls && { tool_calls: (msg as any).tool_calls }),
...((msg as any).tool_call_id && {
tool_call_id: (msg as any).tool_call_id,
}),
...("name" in msg && msg.name && { name: msg.name }),
...("tool_calls" in msg &&
msg.tool_calls && { tool_calls: msg.tool_calls }),
...("tool_call_id" in msg &&
msg.tool_call_id && {
tool_call_id: msg.tool_call_id,
}),
}))
const response = await addConversation({
@ -355,7 +360,7 @@ const addMemoryTool = async (
logger.info("Conversation saved successfully via /v4/conversations", {
containerTag,
conversationId,
customId,
messageCount: messages.length,
responseId: response.id,
})
@ -391,7 +396,7 @@ const addMemoryTool = async (
*
* @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID)
* @param options - Optional configuration options for the middleware
* @param options.conversationId - Optional conversation ID to group messages for contextual memory generation
* @param options.customId - Optional conversation ID to group messages for contextual memory generation
* @param options.verbose - Enable detailed logging of memory operations (default: false)
* @param options.mode - Memory search mode: "profile" (all memories), "query" (search-based), or "full" (both) (default: "profile")
* @param options.addMemory - Automatic memory storage mode: "always" or "never" (default: "never")
@ -401,7 +406,7 @@ const addMemoryTool = async (
* @example
* ```typescript
* const openaiWithSupermemory = createOpenAIMiddleware(openai, "user-123", {
* conversationId: "conversation-456",
* customId: "conversation-456",
* mode: "full",
* addMemory: "always",
* verbose: true
@ -421,9 +426,9 @@ export function createOpenAIMiddleware(
...(baseUrl !== "https://api.supermemory.ai" ? { baseURL: baseUrl } : {}),
})
const conversationId = options?.conversationId
const customId = options?.customId
const mode = options?.mode ?? "profile"
const addMemory = options?.addMemory ?? "never"
const addMemory = options?.addMemory ?? "always"
const originalCreate = openaiClient.chat.completions.create
const originalResponsesCreate = openaiClient.responses?.create
@ -534,20 +539,18 @@ export function createOpenAIMiddleware(
logger.info("Starting memory search for Responses API", {
containerTag,
conversationId,
customId,
mode,
})
const operations: Promise<any>[] = []
const operations: Promise<unknown>[] = []
if (addMemory === "always" && input?.trim()) {
const content = conversationId ? `Input: ${input}` : input
const customId = conversationId
? `conversation:${conversationId}`
: undefined
const content = customId ? `Input: ${input}` : input
const memoryCustomId = customId ? `conversation:${customId}` : undefined
operations.push(
addMemoryTool(client, containerTag, content, customId, logger),
addMemoryTool(client, containerTag, content, memoryCustomId, logger),
)
}
@ -590,28 +593,26 @@ export function createOpenAIMiddleware(
logger.info("Starting memory search", {
containerTag,
conversationId,
customId,
mode,
})
const operations: Promise<any>[] = []
const operations: Promise<unknown>[] = []
if (addMemory === "always") {
const userMessage = getLastUserMessage(messages)
if (userMessage?.trim()) {
const content = conversationId
const content = customId
? getConversationContent(messages)
: userMessage
const customId = conversationId
? `conversation:${conversationId}`
: undefined
const memoryCustomId = customId ? `conversation:${customId}` : undefined
operations.push(
addMemoryTool(
client,
containerTag,
content,
customId,
memoryCustomId,
logger,
messages,
process.env.SUPERMEMORY_API_KEY,

View file

@ -13,8 +13,9 @@ export async function POST(req: Request) {
apiKey: process.env.OPENAI_API_KEY,
})
const openaiWithSupermemory = withSupermemory(openai, "user-123", {
conversationId,
const openaiWithSupermemory = withSupermemory(openai, {
containerTag: "user-123",
customId: conversationId,
mode: "full",
addMemory: "always",
verbose: true,

View file

@ -5,7 +5,9 @@ const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
})
const openaiWithSupermemory = withSupermemory(openai, "user_id_life", {
const openaiWithSupermemory = withSupermemory(openai, {
containerTag: "user_id_life",
customId: "test-conversation",
verbose: true,
mode: "full",
addMemory: "always",