chore: skipMemoryOnError for withSupermemory (#871)

This commit is contained in:
MaheshtheDev 2026-04-21 16:43:23 +00:00
parent 6b0105d340
commit 20c5a18e13
3 changed files with 75 additions and 12 deletions

View file

@ -106,6 +106,18 @@ const model = withSupermemory(openai("gpt-4"), "user-123", {
// Console output shows memory retrieval details
```
### When Supermemory errors (optional: continue without memories)
If the Supermemory API returns an error (or is unreachable), memory retrieval fails before the LLM runs. By default that error **propagates** (fails the call).
To continue the LLM request **without** injected memories instead, opt in with `skipMemoryOnError: true`. Use `verbose: true` if you want console output when that happens.
```typescript
const model = withSupermemory(openai("gpt-5"), "user-123", {
skipMemoryOnError: true
})
```
---
## Memory Tools

View file

@ -1,7 +1,7 @@
{
"name": "@supermemory/tools",
"type": "module",
"version": "1.4.4",
"version": "1.4.5",
"description": "Memory tools for AI SDK and OpenAI function calling with supermemory",
"scripts": {
"build": "tsdown",

View file

@ -50,6 +50,12 @@ interface WrapVercelLanguageModelOptions {
* ```
*/
promptTemplate?: PromptTemplate
/**
* When Supermemory memory retrieval / injection fails:
* - `false` (default): propagate the error.
* - `true`: log and call the base model with the original prompt (no memories).
*/
skipMemoryOnError?: boolean
}
/**
@ -72,6 +78,7 @@ interface WrapVercelLanguageModelOptions {
* @param options.addMemory - Optional mode for memory search: "always", "never" (default: "never")
* @param options.apiKey - Optional Supermemory API key to use instead of the environment variable
* @param options.baseUrl - Optional base URL for the Supermemory API (default: "https://api.supermemory.ai")
* @param options.skipMemoryOnError - When memory retrieval fails: `false` (default) throws; `true` continues without injected memories
*
* @returns A wrapped language model that automatically includes relevant memories in prompts
*
@ -93,7 +100,7 @@ interface WrapVercelLanguageModelOptions {
* ```
*
* @throws {Error} When neither `options.apiKey` nor `process.env.SUPERMEMORY_API_KEY` are set
* @throws {Error} When supermemory API request fails
* @throws {Error} When supermemory memory retrieval fails unless `skipMemoryOnError` is `true`
*/
const wrapVercelLanguageModel = <T extends LanguageModel>(
model: T,
@ -119,19 +126,42 @@ const wrapVercelLanguageModel = <T extends LanguageModel>(
promptTemplate: options?.promptTemplate,
})
const skipMemoryOnError = options?.skipMemoryOnError ?? false
// Proxy keeps prototype/getter fields (e.g. provider, modelId) that `{ ...model }` drops.
return new Proxy(model, {
get(target, prop, receiver) {
if (prop === "doGenerate") {
return async (params: LanguageModelCallOptions) => {
let modelParams: LanguageModelCallOptions = params
try {
const transformedParams = await transformParamsWithMemory(
params,
ctx,
)
modelParams = await transformParamsWithMemory(params, ctx)
} catch (memoryError) {
if (skipMemoryOnError) {
ctx.logger.warn(
"Supermemory retrieval failed; continuing without injected memories",
{
error:
memoryError instanceof Error
? memoryError.message
: "Unknown error",
},
)
modelParams = params
} else {
ctx.logger.error("Error during memory retrieval for generation", {
error:
memoryError instanceof Error
? memoryError.message
: "Unknown error",
})
throw memoryError
}
}
try {
// biome-ignore lint/suspicious/noExplicitAny: Union type compatibility between V2 and V3
const result = await target.doGenerate(transformedParams as any)
const result = await target.doGenerate(modelParams as any)
const userMessage = getLastUserMessage(params)
if (
@ -168,15 +198,36 @@ const wrapVercelLanguageModel = <T extends LanguageModel>(
return async (params: LanguageModelCallOptions) => {
let generatedText = ""
let modelParams: LanguageModelCallOptions = params
try {
const transformedParams = await transformParamsWithMemory(
params,
ctx,
)
modelParams = await transformParamsWithMemory(params, ctx)
} catch (memoryError) {
if (skipMemoryOnError) {
ctx.logger.warn(
"Supermemory retrieval failed; continuing without injected memories",
{
error:
memoryError instanceof Error
? memoryError.message
: "Unknown error",
},
)
modelParams = params
} else {
ctx.logger.error("Error during memory retrieval for stream", {
error:
memoryError instanceof Error
? memoryError.message
: "Unknown error",
})
throw memoryError
}
}
try {
const { stream, ...rest } = await target.doStream(
// biome-ignore lint/suspicious/noExplicitAny: Union type compatibility between V2 and V3
transformedParams as any,
modelParams as any,
)
const transformStream = new TransformStream<