mirror of
https://github.com/supermemoryai/supermemory.git
synced 2026-04-28 03:29:59 +00:00
feat: new tools package (#407)
Some checks failed
Publish AI SDK / publish (push) Has been cancelled
Some checks failed
Publish AI SDK / publish (push) Has been cancelled
This commit is contained in:
parent
33277b492c
commit
cae7051d1a
28 changed files with 963 additions and 2617 deletions
28
biome.json
28
biome.json
|
|
@ -13,22 +13,22 @@
|
|||
"files": {
|
||||
"includes": [
|
||||
"**",
|
||||
"!**/node_modules",
|
||||
"!**/node_modules/",
|
||||
"!**/.next/",
|
||||
"!**/.contentlayer",
|
||||
"!**/.vercel",
|
||||
"!**/.react-router",
|
||||
"!**/.wrangler",
|
||||
"!**/.contentlayer/",
|
||||
"!**/.vercel/",
|
||||
"!**/.react-router/",
|
||||
"!**/.wrangler/",
|
||||
"!**/package.json",
|
||||
"!**/worker-configuration.d.ts",
|
||||
"!**/.turbo/",
|
||||
"!**/.vercel",
|
||||
"!**/dist",
|
||||
"!**/.astro",
|
||||
"!**/build",
|
||||
"!**/.alchemy",
|
||||
"!**/.build",
|
||||
"!**/.open-next",
|
||||
"!**/.vercel/",
|
||||
"!**/dist/",
|
||||
"!**/.astro/",
|
||||
"!**/build/",
|
||||
"!**/.alchemy/",
|
||||
"!**/.build/",
|
||||
"!**/.open-next/",
|
||||
"!**/*.astro"
|
||||
]
|
||||
},
|
||||
|
|
@ -39,7 +39,7 @@
|
|||
"javascript": {
|
||||
"formatter": {
|
||||
"quoteStyle": "double",
|
||||
"semicolons": "always"
|
||||
"semicolons": "asNeeded"
|
||||
}
|
||||
},
|
||||
"linter": {
|
||||
|
|
@ -58,7 +58,7 @@
|
|||
"ignoreRestSiblings": true
|
||||
}
|
||||
},
|
||||
"useExhaustiveDependencies": "off",
|
||||
"useExhaustiveDependencies": "warn",
|
||||
"noUnusedImports": "warn"
|
||||
},
|
||||
"recommended": true,
|
||||
|
|
|
|||
27
bun.lock
27
bun.lock
|
|
@ -154,7 +154,7 @@
|
|||
},
|
||||
"packages/ai-sdk": {
|
||||
"name": "@supermemory/ai-sdk",
|
||||
"version": "1.0.7",
|
||||
"version": "1.0.8",
|
||||
"dependencies": {
|
||||
"@ai-sdk/openai": "^2.0.22",
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
|
|
@ -184,10 +184,13 @@
|
|||
"ai-gateway-provider": "^0.0.11",
|
||||
},
|
||||
},
|
||||
"packages/openai-sdk-ts": {
|
||||
"name": "@supermemory/openai-sdk",
|
||||
"packages/tools": {
|
||||
"name": "@supermemory/tools",
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"@ai-sdk/openai": "^2.0.22",
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
"ai": "^5.0.26",
|
||||
"openai": "^4.104.0",
|
||||
"supermemory": "^3.0.0-alpha.26",
|
||||
"zod": "^4.1.4",
|
||||
|
|
@ -1353,7 +1356,7 @@
|
|||
|
||||
"@supermemory/ai-sdk": ["@supermemory/ai-sdk@workspace:packages/ai-sdk"],
|
||||
|
||||
"@supermemory/openai-sdk": ["@supermemory/openai-sdk@workspace:packages/openai-sdk-ts"],
|
||||
"@supermemory/tools": ["@supermemory/tools@workspace:packages/tools"],
|
||||
|
||||
"@swc/core": ["@swc/core@1.13.5", "", { "dependencies": { "@swc/counter": "^0.1.3", "@swc/types": "^0.1.24" }, "optionalDependencies": { "@swc/core-darwin-arm64": "1.13.5", "@swc/core-darwin-x64": "1.13.5", "@swc/core-linux-arm-gnueabihf": "1.13.5", "@swc/core-linux-arm64-gnu": "1.13.5", "@swc/core-linux-arm64-musl": "1.13.5", "@swc/core-linux-x64-gnu": "1.13.5", "@swc/core-linux-x64-musl": "1.13.5", "@swc/core-win32-arm64-msvc": "1.13.5", "@swc/core-win32-ia32-msvc": "1.13.5", "@swc/core-win32-x64-msvc": "1.13.5" }, "peerDependencies": { "@swc/helpers": ">=0.5.17" }, "optionalPeers": ["@swc/helpers"] }, "sha512-WezcBo8a0Dg2rnR82zhwoR6aRNxeTGfK5QCD6TQ+kg3xx/zNT02s/0o+81h/3zhvFSB24NtqEr8FTw88O5W/JQ=="],
|
||||
|
||||
|
|
@ -4105,6 +4108,8 @@
|
|||
|
||||
"@repo/web/ai": ["ai@5.0.0-beta.24", "", { "dependencies": { "@ai-sdk/gateway": "1.0.0-beta.10", "@ai-sdk/provider": "2.0.0-beta.1", "@ai-sdk/provider-utils": "3.0.0-beta.5", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4" }, "bin": { "ai": "dist/bin/ai.min.js" } }, "sha512-glQIA+PGEP+UEPB+thdqNZi9Ot4Yjiqsl071S1KPaRTGHmBIg/c8OYb2mXCRM+3cNCFGVnCTudZoYUVNwBpFxg=="],
|
||||
|
||||
"@repo/web/typescript": ["typescript@5.9.2", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-CWBzXQrc/qOkhidw1OzBTQuYRbfyxDXJMVJ1XNwUHGROVmuaeiEm3OslpZ1RV96d7SKKjZKrSJu3+t/xlw3R9A=="],
|
||||
|
||||
"@repo/web/wrangler": ["wrangler@4.33.1", "", { "dependencies": { "@cloudflare/kv-asset-handler": "0.4.0", "@cloudflare/unenv-preset": "2.7.0", "blake3-wasm": "2.1.5", "esbuild": "0.25.4", "miniflare": "4.20250823.1", "path-to-regexp": "6.3.0", "unenv": "2.0.0-rc.19", "workerd": "1.20250823.0" }, "optionalDependencies": { "fsevents": "~2.3.2" }, "peerDependencies": { "@cloudflare/workers-types": "^4.20250823.0" }, "optionalPeers": ["@cloudflare/workers-types"], "bin": { "wrangler": "bin/wrangler.js", "wrangler2": "bin/wrangler.js" } }, "sha512-8x/3Tbt+/raBMm0+vRyAHSGu2kF1QjeiSrx47apgPk/AzSBcXI9YuUUdGrKnozMYZlEbOxdBQOMyuRRDTyNmOg=="],
|
||||
|
||||
"@scalar/openapi-types/zod": ["zod@3.24.1", "", {}, "sha512-muH7gBL9sI1nciMZV67X5fTKKBLtwpZ5VBp1vsOQzj1MhrBZ4wlVCm3gedKZWLp0Oyel8sIGfeiz54Su+OVT+A=="],
|
||||
|
|
@ -4145,9 +4150,13 @@
|
|||
|
||||
"@supermemory/ai-sdk/zod": ["zod@4.1.5", "", {}, "sha512-rcUUZqlLJgBC33IT3PNMgsCq6TzLQEG/Ei/KTCU0PedSWRMAXoOUN+4t/0H+Q8bdnLPdqUYnvboJT0bn/229qg=="],
|
||||
|
||||
"@supermemory/openai-sdk/typescript": ["typescript@5.9.2", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-CWBzXQrc/qOkhidw1OzBTQuYRbfyxDXJMVJ1XNwUHGROVmuaeiEm3OslpZ1RV96d7SKKjZKrSJu3+t/xlw3R9A=="],
|
||||
"@supermemory/tools/@ai-sdk/openai": ["@ai-sdk/openai@2.0.23", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.7" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-uOXk8HzmMUoCmD0JMX/Y1HC/ABOR/Jza2Z2rkCaJISDYz3fp5pnb6eNjcPRL48JSMzRAGp9UP5p0OpxS06IJZg=="],
|
||||
|
||||
"@supermemory/openai-sdk/zod": ["zod@4.1.5", "", {}, "sha512-rcUUZqlLJgBC33IT3PNMgsCq6TzLQEG/Ei/KTCU0PedSWRMAXoOUN+4t/0H+Q8bdnLPdqUYnvboJT0bn/229qg=="],
|
||||
"@supermemory/tools/ai": ["ai@5.0.29", "", { "dependencies": { "@ai-sdk/gateway": "1.0.15", "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.7", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-jA/d6X5hn3r/PxgZjwzDUMJiEkLBIVVD2gcbpcT/FD4MSLxm5sn6fH1y2VFXVgBEd95mNzQ8ALQubysc6E8Y9g=="],
|
||||
|
||||
"@supermemory/tools/typescript": ["typescript@5.9.2", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-CWBzXQrc/qOkhidw1OzBTQuYRbfyxDXJMVJ1XNwUHGROVmuaeiEm3OslpZ1RV96d7SKKjZKrSJu3+t/xlw3R9A=="],
|
||||
|
||||
"@supermemory/tools/zod": ["zod@4.1.5", "", {}, "sha512-rcUUZqlLJgBC33IT3PNMgsCq6TzLQEG/Ei/KTCU0PedSWRMAXoOUN+4t/0H+Q8bdnLPdqUYnvboJT0bn/229qg=="],
|
||||
|
||||
"@tailwindcss/oxide-wasm32-wasi/@emnapi/core": ["@emnapi/core@1.5.0", "", { "dependencies": { "@emnapi/wasi-threads": "1.1.0", "tslib": "^2.4.0" }, "bundled": true }, "sha512-sbP8GzB1WDzacS8fgNPpHlp6C9VZe+SJP3F90W9rLemaQj2PzIuTEl1qDOYQf58YIpyjViI24y9aPWCjEzY2cg=="],
|
||||
|
||||
|
|
@ -5233,6 +5242,12 @@
|
|||
|
||||
"@supermemory/ai-sdk/ai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.7", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.5" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-o3BS5/t8KnBL3ubP8k3w77AByOypLm+pkIL/DCw0qKkhDbvhCy+L3hRTGPikpdb8WHcylAeKsjgwOxhj4cqTUA=="],
|
||||
|
||||
"@supermemory/tools/@ai-sdk/openai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.7", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.5" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-o3BS5/t8KnBL3ubP8k3w77AByOypLm+pkIL/DCw0qKkhDbvhCy+L3hRTGPikpdb8WHcylAeKsjgwOxhj4cqTUA=="],
|
||||
|
||||
"@supermemory/tools/ai/@ai-sdk/gateway": ["@ai-sdk/gateway@1.0.15", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.7" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-xySXoQ29+KbGuGfmDnABx+O6vc7Gj7qugmj1kGpn0rW0rQNn6UKUuvscKMzWyv1Uv05GyC1vqHq8ZhEOLfXscQ=="],
|
||||
|
||||
"@supermemory/tools/ai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.7", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.5" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-o3BS5/t8KnBL3ubP8k3w77AByOypLm+pkIL/DCw0qKkhDbvhCy+L3hRTGPikpdb8WHcylAeKsjgwOxhj4cqTUA=="],
|
||||
|
||||
"alchemy/@cloudflare/unenv-preset/unenv": ["unenv@2.0.0-rc.19", "", { "dependencies": { "defu": "^6.1.4", "exsolve": "^1.0.7", "ohash": "^2.0.11", "pathe": "^2.0.3", "ufo": "^1.6.1" } }, "sha512-t/OMHBNAkknVCI7bVB9OWjUUAwhVv9vsPIAGnNUxnu3FxPQN11rjh0sksLMzc3g7IlTgvHmOTl4JM7JHpcv5wA=="],
|
||||
|
||||
"alchemy/@cloudflare/unenv-preset/workerd": ["workerd@1.20250823.0", "", { "optionalDependencies": { "@cloudflare/workerd-darwin-64": "1.20250823.0", "@cloudflare/workerd-darwin-arm64": "1.20250823.0", "@cloudflare/workerd-linux-64": "1.20250823.0", "@cloudflare/workerd-linux-arm64": "1.20250823.0", "@cloudflare/workerd-windows-64": "1.20250823.0" }, "bin": { "workerd": "bin/workerd" } }, "sha512-95lToK9zeaC7bX5ZmlP/wz6zqoUPBk3hhec1JjEMGZrxsXY9cPRkjWNCcjDctQ17U97vjMcY/ymchgx7w8Cfmg=="],
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"name": "@supermemory/ai-sdk",
|
||||
"type": "module",
|
||||
"version": "1.0.7",
|
||||
"version": "1.0.8",
|
||||
"scripts": {
|
||||
"build": "tsdown",
|
||||
"dev": "tsdown --watch",
|
||||
|
|
|
|||
|
|
@ -1,2 +1 @@
|
|||
export * from "./infinite-chat"
|
||||
export * from "./tools"
|
||||
|
|
|
|||
|
|
@ -1,365 +0,0 @@
|
|||
import { generateText } from "ai"
|
||||
import { describe, expect, it } from "vitest"
|
||||
import z from "zod"
|
||||
import {
|
||||
createSupermemoryInfiniteChat,
|
||||
type SupermemoryInfiniteChatConfig,
|
||||
} from "./infinite-chat"
|
||||
|
||||
import "dotenv/config"
|
||||
|
||||
const providers = z.enum([
|
||||
"openai",
|
||||
"anthropic",
|
||||
"openrouter",
|
||||
"deepinfra",
|
||||
"groq",
|
||||
"google",
|
||||
"cloudflare",
|
||||
] satisfies SupermemoryInfiniteChatConfig["providerName"][])
|
||||
|
||||
describe("createSupermemoryInfiniteChat", () => {
|
||||
// Required API keys - tests will fail if not provided
|
||||
const testApiKey = process.env.SUPERMEMORY_API_KEY
|
||||
const testProviderApiKey = process.env.PROVIDER_API_KEY
|
||||
|
||||
if (!testApiKey) {
|
||||
throw new Error(
|
||||
"SUPERMEMORY_API_KEY environment variable is required for tests",
|
||||
)
|
||||
}
|
||||
if (!testProviderApiKey) {
|
||||
throw new Error(
|
||||
"PROVIDER_API_KEY environment variable is required for tests",
|
||||
)
|
||||
}
|
||||
|
||||
// Optional configuration with defaults
|
||||
const testProviderName = providers.parse(
|
||||
process.env.PROVIDER_NAME ?? "openai",
|
||||
)
|
||||
const testProviderUrl = process.env.PROVIDER_URL
|
||||
const testModelName = process.env.MODEL_NAME || "gpt-5-mini"
|
||||
const testHeaders = { "custom-header": "test-value" }
|
||||
|
||||
// Validate provider configuration - either name OR URL, not both
|
||||
if (testProviderUrl && process.env.PROVIDER_NAME) {
|
||||
throw new Error(
|
||||
"Cannot specify both PROVIDER_NAME and PROVIDER_URL - use one or the other",
|
||||
)
|
||||
}
|
||||
|
||||
// Test prompts and inputs
|
||||
const testPrompts = [
|
||||
"Hello, how are you?",
|
||||
"What is 2 + 2?",
|
||||
"Write a short poem about AI",
|
||||
"Explain quantum computing in simple terms",
|
||||
"What can you help me with today?",
|
||||
]
|
||||
|
||||
const testMessages = [
|
||||
[{ role: "user" as const, content: "Hello!" }],
|
||||
[
|
||||
{ role: "system" as const, content: "You are a helpful assistant." },
|
||||
{ role: "user" as const, content: "What is AI?" },
|
||||
],
|
||||
[
|
||||
{ role: "user" as const, content: "Tell me a joke" },
|
||||
{
|
||||
role: "assistant" as const,
|
||||
content:
|
||||
"Why don't scientists trust atoms? Because they make up everything!",
|
||||
},
|
||||
{ role: "user" as const, content: "Tell me another one" },
|
||||
],
|
||||
]
|
||||
|
||||
describe("client creation", () => {
|
||||
it("should create client with configured provider", () => {
|
||||
const config: SupermemoryInfiniteChatConfig = testProviderUrl
|
||||
? {
|
||||
providerUrl: testProviderUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: testHeaders,
|
||||
}
|
||||
: {
|
||||
providerName: testProviderName,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: testHeaders,
|
||||
}
|
||||
|
||||
const client = createSupermemoryInfiniteChat(testApiKey, config)
|
||||
|
||||
expect(client).toBeDefined()
|
||||
expect(typeof client).toBe("function")
|
||||
})
|
||||
|
||||
it("should create client with openai provider configuration", () => {
|
||||
const config: SupermemoryInfiniteChatConfig = {
|
||||
providerName: "openai",
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: testHeaders,
|
||||
}
|
||||
|
||||
const client = createSupermemoryInfiniteChat(testApiKey, config)
|
||||
|
||||
expect(client).toBeDefined()
|
||||
expect(typeof client).toBe("function")
|
||||
})
|
||||
|
||||
it("should create client with anthropic provider configuration", () => {
|
||||
const config: SupermemoryInfiniteChatConfig = {
|
||||
providerName: "anthropic",
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: testHeaders,
|
||||
}
|
||||
|
||||
const client = createSupermemoryInfiniteChat(testApiKey, config)
|
||||
|
||||
expect(client).toBeDefined()
|
||||
expect(typeof client).toBe("function")
|
||||
})
|
||||
|
||||
it("should create client with custom provider URL", () => {
|
||||
const customUrl = "https://custom-provider.com/v1/chat"
|
||||
const config: SupermemoryInfiniteChatConfig = {
|
||||
providerUrl: customUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: testHeaders,
|
||||
}
|
||||
|
||||
const client = createSupermemoryInfiniteChat(testApiKey, config)
|
||||
|
||||
expect(client).toBeDefined()
|
||||
expect(typeof client).toBe("function")
|
||||
})
|
||||
})
|
||||
|
||||
describe("AI SDK integration", () => {
|
||||
it("should generate text with simple prompt", async () => {
|
||||
const config: SupermemoryInfiniteChatConfig = testProviderUrl
|
||||
? {
|
||||
providerUrl: testProviderUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
: {
|
||||
providerName: testProviderName,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
|
||||
const client = createSupermemoryInfiniteChat(testApiKey, config)
|
||||
|
||||
console.log(client(testModelName))
|
||||
|
||||
const result = await generateText({
|
||||
model: client(testModelName),
|
||||
prompt: testPrompts[0], // "Hello, how are you?"
|
||||
})
|
||||
|
||||
expect(result).toBeDefined()
|
||||
expect(result.text).toBeDefined()
|
||||
expect(typeof result.text).toBe("string")
|
||||
expect(result.text.length).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
it("should generate text with messages array", async () => {
|
||||
const config: SupermemoryInfiniteChatConfig = testProviderUrl
|
||||
? {
|
||||
providerUrl: testProviderUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
: {
|
||||
providerName: testProviderName,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
|
||||
const client = createSupermemoryInfiniteChat(testApiKey, config)
|
||||
|
||||
const result = await generateText({
|
||||
model: client(testModelName),
|
||||
messages: testMessages[1], // System + user messages
|
||||
})
|
||||
|
||||
expect(result).toBeDefined()
|
||||
expect(result.text).toBeDefined()
|
||||
expect(typeof result.text).toBe("string")
|
||||
expect(result.text.length).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
it("should handle conversation history", async () => {
|
||||
const config: SupermemoryInfiniteChatConfig = testProviderUrl
|
||||
? {
|
||||
providerUrl: testProviderUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
: {
|
||||
providerName: testProviderName,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
|
||||
const client = createSupermemoryInfiniteChat(testApiKey, config)
|
||||
|
||||
const result = await generateText({
|
||||
model: client(testModelName),
|
||||
messages: testMessages[2], // Multi-turn conversation
|
||||
})
|
||||
|
||||
expect(result).toBeDefined()
|
||||
expect(result.text).toBeDefined()
|
||||
expect(typeof result.text).toBe("string")
|
||||
expect(result.text.length).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
it("should work with different prompt variations", async () => {
|
||||
const config: SupermemoryInfiniteChatConfig = testProviderUrl
|
||||
? {
|
||||
providerUrl: testProviderUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
: {
|
||||
providerName: testProviderName,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
|
||||
const client = createSupermemoryInfiniteChat(testApiKey, config)
|
||||
|
||||
// Test multiple prompts
|
||||
for (const prompt of testPrompts.slice(0, 3)) {
|
||||
const result = await generateText({
|
||||
model: client(testModelName),
|
||||
prompt,
|
||||
})
|
||||
|
||||
expect(result).toBeDefined()
|
||||
expect(result.text).toBeDefined()
|
||||
expect(typeof result.text).toBe("string")
|
||||
expect(result.text.length).toBeGreaterThan(0)
|
||||
}
|
||||
})
|
||||
|
||||
it("should work with configured and alternate models", async () => {
|
||||
const config: SupermemoryInfiniteChatConfig = testProviderUrl
|
||||
? {
|
||||
providerUrl: testProviderUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
: {
|
||||
providerName: testProviderName,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
|
||||
const client = createSupermemoryInfiniteChat(testApiKey, config)
|
||||
|
||||
const modelsToTest = [testModelName]
|
||||
// Add alternate model for OpenAI
|
||||
if (testProviderName === "openai" && !testProviderUrl) {
|
||||
modelsToTest.push("gpt-4o-mini")
|
||||
}
|
||||
|
||||
for (const modelName of modelsToTest) {
|
||||
const result = await generateText({
|
||||
model: client(modelName),
|
||||
prompt: "Say hello in one word",
|
||||
})
|
||||
|
||||
expect(result).toBeDefined()
|
||||
expect(result.text).toBeDefined()
|
||||
expect(typeof result.text).toBe("string")
|
||||
}
|
||||
})
|
||||
|
||||
it("should work with custom headers", async () => {
|
||||
const config: SupermemoryInfiniteChatConfig = testProviderUrl
|
||||
? {
|
||||
providerUrl: testProviderUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {
|
||||
"x-custom-header": "test-value",
|
||||
},
|
||||
}
|
||||
: {
|
||||
providerName: testProviderName,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {
|
||||
"x-custom-header": "test-value",
|
||||
},
|
||||
}
|
||||
|
||||
const client = createSupermemoryInfiniteChat(testApiKey, config)
|
||||
|
||||
const result = await generateText({
|
||||
model: client(testModelName),
|
||||
prompt: "Hello",
|
||||
})
|
||||
|
||||
expect(result).toBeDefined()
|
||||
expect(result.text).toBeDefined()
|
||||
expect(typeof result.text).toBe("string")
|
||||
})
|
||||
})
|
||||
|
||||
describe("configuration validation", () => {
|
||||
it("should handle empty headers object", () => {
|
||||
const config: SupermemoryInfiniteChatConfig = testProviderUrl
|
||||
? {
|
||||
providerUrl: testProviderUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
: {
|
||||
providerName: testProviderName,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
|
||||
const client = createSupermemoryInfiniteChat(testApiKey, config)
|
||||
|
||||
expect(client).toBeDefined()
|
||||
})
|
||||
|
||||
it("should handle configuration with custom headers", () => {
|
||||
const customHeaders = {
|
||||
authorization: "Bearer custom-token",
|
||||
"x-custom": "custom-value",
|
||||
}
|
||||
const config: SupermemoryInfiniteChatConfig = testProviderUrl
|
||||
? {
|
||||
providerUrl: testProviderUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: customHeaders,
|
||||
}
|
||||
: {
|
||||
providerName: testProviderName,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: customHeaders,
|
||||
}
|
||||
|
||||
const client = createSupermemoryInfiniteChat(testApiKey, config)
|
||||
|
||||
expect(client).toBeDefined()
|
||||
})
|
||||
|
||||
it("should handle different API keys", () => {
|
||||
const config: SupermemoryInfiniteChatConfig = {
|
||||
providerName: "openai",
|
||||
providerApiKey: "different-provider-key",
|
||||
headers: {},
|
||||
}
|
||||
|
||||
const client = createSupermemoryInfiniteChat("different-sm-key", config)
|
||||
|
||||
expect(client).toBeDefined()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -1,49 +0,0 @@
|
|||
import { createOpenAI } from "@ai-sdk/openai"
|
||||
|
||||
interface SupermemoryInfiniteChatConfigBase {
|
||||
providerApiKey: string
|
||||
headers: Record<string, string>
|
||||
}
|
||||
|
||||
interface SupermemoryInfiniteChatConfigWithProviderName
|
||||
extends SupermemoryInfiniteChatConfigBase {
|
||||
providerName: keyof typeof providerMap
|
||||
providerUrl?: never
|
||||
}
|
||||
|
||||
interface SupermemoryInfiniteChatConfigWithProviderUrl
|
||||
extends SupermemoryInfiniteChatConfigBase {
|
||||
providerUrl: string
|
||||
providerName?: never
|
||||
}
|
||||
|
||||
export type SupermemoryInfiniteChatConfig =
|
||||
| SupermemoryInfiniteChatConfigWithProviderName
|
||||
| SupermemoryInfiniteChatConfigWithProviderUrl
|
||||
|
||||
type SupermemoryApiKey = string
|
||||
|
||||
const providerMap = {
|
||||
openai: "https://api.openai.com/v1",
|
||||
anthropic: "https://api.anthropic.com/v1",
|
||||
openrouter: "https://openrouter.ai/api/v1",
|
||||
deepinfra: "https://api.deepinfra.com/v1/openai",
|
||||
groq: "https://api.groq.com/openai/v1",
|
||||
google: "https://generativelanguage.googleapis.com/v1beta/openai",
|
||||
cloudflare: "https://gateway.ai.cloudflare.com/v1/*/unlimited-context/openai",
|
||||
} as const
|
||||
|
||||
export const createSupermemoryInfiniteChat = (
|
||||
apiKey: SupermemoryApiKey,
|
||||
config?: SupermemoryInfiniteChatConfig,
|
||||
) =>
|
||||
createOpenAI({
|
||||
apiKey: config?.providerApiKey,
|
||||
baseURL: config?.providerName
|
||||
? providerMap[config.providerName]
|
||||
: config?.providerUrl,
|
||||
headers: {
|
||||
"x-supermemory-api-key": apiKey,
|
||||
...config?.headers,
|
||||
},
|
||||
}).chat
|
||||
|
|
@ -1,17 +1,8 @@
|
|||
# Supermemory OpenAI Python SDK
|
||||
|
||||
Enhanced OpenAI Python SDK with Supermemory infinite context integration.
|
||||
Memory tools for OpenAI function calling with Supermemory integration.
|
||||
|
||||
This package extends the official [OpenAI Python SDK](https://github.com/openai/openai-python) with [Supermemory](https://supermemory.ai) capabilities, enabling infinite context chat completions and memory management tools.
|
||||
|
||||
## Features
|
||||
|
||||
- 🚀 **Infinite Context**: Chat completions with unlimited conversation history
|
||||
- 🧠 **Memory Tools**: Search, add, and fetch user memories seamlessly
|
||||
- 🔌 **Multiple Providers**: Support for OpenAI, Anthropic, Groq, and more
|
||||
- 🛠 **Function Calling**: Built-in memory tools for OpenAI function calling
|
||||
- 🔒 **Type Safe**: Full TypeScript-style type hints for Python
|
||||
- ⚡ **Async Support**: Full async/await support
|
||||
This package provides memory management tools for the official [OpenAI Python SDK](https://github.com/openai/openai-python) using [Supermemory](https://supermemory.ai) capabilities.
|
||||
|
||||
## Installation
|
||||
|
||||
|
|
@ -29,58 +20,26 @@ pip install supermemory-openai
|
|||
|
||||
## Quick Start
|
||||
|
||||
### Basic Chat Completion
|
||||
### Using Memory Tools with OpenAI
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from supermemory_openai import SupermemoryOpenAI, SupermemoryInfiniteChatConfigWithProviderName
|
||||
import openai
|
||||
from supermemory_openai import SupermemoryTools, execute_memory_tool_calls
|
||||
|
||||
async def main():
|
||||
# Initialize client
|
||||
client = SupermemoryOpenAI(
|
||||
supermemory_api_key="your-supermemory-api-key",
|
||||
config=SupermemoryInfiniteChatConfigWithProviderName(
|
||||
provider_name="openai",
|
||||
provider_api_key="your-openai-api-key",
|
||||
)
|
||||
)
|
||||
|
||||
# Create chat completion
|
||||
response = await client.chat_completion(
|
||||
messages=[
|
||||
{"role": "user", "content": "Hello, how are you?"}
|
||||
],
|
||||
model="gpt-4o"
|
||||
)
|
||||
|
||||
print(response.choices[0].message.content)
|
||||
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
### Using Memory Tools
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from supermemory_openai import SupermemoryOpenAI, SupermemoryTools, SupermemoryInfiniteChatConfigWithProviderName
|
||||
|
||||
async def main():
|
||||
# Initialize client and tools
|
||||
client = SupermemoryOpenAI(
|
||||
supermemory_api_key="your-supermemory-api-key",
|
||||
config=SupermemoryInfiniteChatConfigWithProviderName(
|
||||
provider_name="openai",
|
||||
provider_api_key="your-openai-api-key",
|
||||
)
|
||||
)
|
||||
# Initialize OpenAI client
|
||||
client = openai.AsyncOpenAI(api_key="your-openai-api-key")
|
||||
|
||||
# Initialize Supermemory tools
|
||||
tools = SupermemoryTools(
|
||||
api_key="your-supermemory-api-key",
|
||||
config={"project_id": "my-project"}
|
||||
)
|
||||
|
||||
# Chat with memory tools
|
||||
response = await client.chat_completion(
|
||||
response = await client.chat.completions.create(
|
||||
model="gpt-4o",
|
||||
messages=[
|
||||
{
|
||||
"role": "system",
|
||||
|
|
@ -91,10 +50,18 @@ async def main():
|
|||
"content": "Remember that I prefer tea over coffee"
|
||||
}
|
||||
],
|
||||
tools=tools.get_tool_definitions(),
|
||||
model="gpt-4o"
|
||||
tools=tools.get_tool_definitions()
|
||||
)
|
||||
|
||||
# Handle tool calls if present
|
||||
if response.choices[0].message.tool_calls:
|
||||
tool_results = await execute_memory_tool_calls(
|
||||
api_key="your-supermemory-api-key",
|
||||
tool_calls=response.choices[0].message.tool_calls,
|
||||
config={"project_id": "my-project"}
|
||||
)
|
||||
print("Tool results:", tool_results)
|
||||
|
||||
print(response.choices[0].message.content)
|
||||
|
||||
asyncio.run(main())
|
||||
|
|
@ -102,42 +69,6 @@ asyncio.run(main())
|
|||
|
||||
## Configuration
|
||||
|
||||
### Provider Configuration
|
||||
|
||||
#### Using Provider Names
|
||||
|
||||
```python
|
||||
from supermemory_openai import SupermemoryInfiniteChatConfigWithProviderName
|
||||
|
||||
config = SupermemoryInfiniteChatConfigWithProviderName(
|
||||
provider_name="openai", # or "anthropic", "groq", "openrouter", etc.
|
||||
provider_api_key="your-provider-api-key",
|
||||
headers={"custom-header": "value"} # optional
|
||||
)
|
||||
```
|
||||
|
||||
#### Using Custom URLs
|
||||
|
||||
```python
|
||||
from supermemory_openai import SupermemoryInfiniteChatConfigWithProviderUrl
|
||||
|
||||
config = SupermemoryInfiniteChatConfigWithProviderUrl(
|
||||
provider_url="https://your-custom-endpoint.com/v1",
|
||||
provider_api_key="your-provider-api-key",
|
||||
headers={"custom-header": "value"} # optional
|
||||
)
|
||||
```
|
||||
|
||||
### Supported Providers
|
||||
|
||||
- `openai` - OpenAI API
|
||||
- `anthropic` - Anthropic Claude
|
||||
- `openrouter` - OpenRouter
|
||||
- `deepinfra` - DeepInfra
|
||||
- `groq` - Groq
|
||||
- `google` - Google AI
|
||||
- `cloudflare` - Cloudflare Workers AI
|
||||
|
||||
## Memory Tools
|
||||
|
||||
### SupermemoryTools Class
|
||||
|
|
@ -205,24 +136,6 @@ if response.choices[0].message.tool_calls:
|
|||
|
||||
## API Reference
|
||||
|
||||
### SupermemoryOpenAI
|
||||
|
||||
Enhanced OpenAI client with infinite context support.
|
||||
|
||||
#### Constructor
|
||||
|
||||
```python
|
||||
SupermemoryOpenAI(
|
||||
supermemory_api_key: str,
|
||||
config: Optional[SupermemoryInfiniteChatConfig] = None
|
||||
)
|
||||
```
|
||||
|
||||
#### Methods
|
||||
|
||||
- `chat_completion()` - Create chat completion with simplified interface
|
||||
- `create_chat_completion()` - Create chat completion with full OpenAI parameters
|
||||
|
||||
### SupermemoryTools
|
||||
|
||||
Memory management tools for function calling.
|
||||
|
|
@ -261,9 +174,7 @@ except Exception as e:
|
|||
Set these environment variables for testing:
|
||||
|
||||
- `SUPERMEMORY_API_KEY` - Your Supermemory API key
|
||||
- `PROVIDER_API_KEY` - Your AI provider API key
|
||||
- `PROVIDER_NAME` - Provider name (default: "openai")
|
||||
- `PROVIDER_URL` - Custom provider URL (optional)
|
||||
- `OPENAI_API_KEY` - Your OpenAI API key
|
||||
- `MODEL_NAME` - Model to use (default: "gpt-4o-mini")
|
||||
- `SUPERMEMORY_BASE_URL` - Custom Supermemory base URL (optional)
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ build-backend = "hatchling.build"
|
|||
[project]
|
||||
name = "supermemory-openai-sdk"
|
||||
version = "1.0.0"
|
||||
description = "OpenAI SDK utilities for supermemory"
|
||||
description = "Memory tools for OpenAI function calling with supermemory"
|
||||
readme = "README.md"
|
||||
license = { text = "MIT" }
|
||||
keywords = ["openai", "supermemory", "ai", "memory"]
|
||||
|
|
|
|||
|
|
@ -1,14 +1,4 @@
|
|||
"""Supermemory OpenAI SDK - Enhanced OpenAI Python SDK with infinite context."""
|
||||
|
||||
from .infinite_chat import (
|
||||
SupermemoryOpenAI,
|
||||
SupermemoryInfiniteChatConfig,
|
||||
SupermemoryInfiniteChatConfigWithProviderName,
|
||||
SupermemoryInfiniteChatConfigWithProviderUrl,
|
||||
ProviderName,
|
||||
PROVIDER_MAP,
|
||||
create_supermemory_openai,
|
||||
)
|
||||
"""Supermemory OpenAI SDK - Memory tools for OpenAI function calling."""
|
||||
|
||||
from .tools import (
|
||||
SupermemoryTools,
|
||||
|
|
@ -29,14 +19,6 @@ from .tools import (
|
|||
__version__ = "0.1.0"
|
||||
|
||||
__all__ = [
|
||||
# Infinite Chat
|
||||
"SupermemoryOpenAI",
|
||||
"SupermemoryInfiniteChatConfig",
|
||||
"SupermemoryInfiniteChatConfigWithProviderName",
|
||||
"SupermemoryInfiniteChatConfigWithProviderUrl",
|
||||
"ProviderName",
|
||||
"PROVIDER_MAP",
|
||||
"create_supermemory_openai",
|
||||
# Tools
|
||||
"SupermemoryTools",
|
||||
"SupermemoryToolsConfig",
|
||||
|
|
|
|||
|
|
@ -1,268 +0,0 @@
|
|||
"""Enhanced OpenAI client with Supermemory infinite context integration."""
|
||||
|
||||
from typing import Dict, List, Optional, Union, overload, Unpack
|
||||
from typing_extensions import Literal
|
||||
|
||||
from openai import OpenAI, AsyncStream
|
||||
from openai.types.chat import (
|
||||
ChatCompletion,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionToolParam,
|
||||
ChatCompletionToolChoiceOptionParam,
|
||||
CompletionCreateParams,
|
||||
)
|
||||
|
||||
|
||||
# Provider URL mapping
|
||||
PROVIDER_MAP = {
|
||||
"openai": "https://api.openai.com/v1",
|
||||
"anthropic": "https://api.anthropic.com/v1",
|
||||
"openrouter": "https://openrouter.ai/api/v1",
|
||||
"deepinfra": "https://api.deepinfra.com/v1/openai",
|
||||
"groq": "https://api.groq.com/openai/v1",
|
||||
"google": "https://generativelanguage.googleapis.com/v1beta/openai",
|
||||
"cloudflare": "https://gateway.ai.cloudflare.com/v1/*/unlimited-context/openai",
|
||||
}
|
||||
|
||||
ProviderName = Literal[
|
||||
"openai", "anthropic", "openrouter", "deepinfra", "groq", "google", "cloudflare"
|
||||
]
|
||||
|
||||
|
||||
class SupermemoryInfiniteChatConfigBase:
|
||||
"""Base configuration for Supermemory infinite chat."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
provider_api_key: str,
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
self.provider_api_key = provider_api_key
|
||||
self.headers = headers or {}
|
||||
|
||||
|
||||
class SupermemoryInfiniteChatConfigWithProviderName(SupermemoryInfiniteChatConfigBase):
|
||||
"""Configuration using a predefined provider name."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
provider_name: ProviderName,
|
||||
provider_api_key: str,
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
super().__init__(provider_api_key, headers)
|
||||
self.provider_name = provider_name
|
||||
self.provider_url: None = None
|
||||
|
||||
|
||||
class SupermemoryInfiniteChatConfigWithProviderUrl(SupermemoryInfiniteChatConfigBase):
|
||||
"""Configuration using a custom provider URL."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
provider_url: str,
|
||||
provider_api_key: str,
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
super().__init__(provider_api_key, headers)
|
||||
self.provider_url = provider_url
|
||||
self.provider_name: None = None
|
||||
|
||||
|
||||
SupermemoryInfiniteChatConfig = Union[
|
||||
SupermemoryInfiniteChatConfigWithProviderName,
|
||||
SupermemoryInfiniteChatConfigWithProviderUrl,
|
||||
]
|
||||
|
||||
|
||||
class SupermemoryOpenAI(OpenAI):
|
||||
"""Enhanced OpenAI client with supermemory integration.
|
||||
|
||||
Only chat completions are supported - all other OpenAI API endpoints are disabled.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
supermemory_api_key: str,
|
||||
config: Optional[SupermemoryInfiniteChatConfig] = None,
|
||||
):
|
||||
"""Initialize the SupermemoryOpenAI client.
|
||||
|
||||
Args:
|
||||
supermemory_api_key: API key for Supermemory service
|
||||
config: Configuration for the AI provider
|
||||
"""
|
||||
# Determine base URL
|
||||
if config is None:
|
||||
base_url = "https://api.openai.com/v1"
|
||||
api_key = None
|
||||
headers = {}
|
||||
elif hasattr(config, "provider_name") and config.provider_name:
|
||||
base_url = PROVIDER_MAP[config.provider_name]
|
||||
api_key = config.provider_api_key
|
||||
headers = config.headers
|
||||
else:
|
||||
base_url = config.provider_url
|
||||
api_key = config.provider_api_key
|
||||
headers = config.headers
|
||||
|
||||
# Prepare default headers
|
||||
default_headers = {
|
||||
"x-supermemory-api-key": supermemory_api_key,
|
||||
**headers,
|
||||
}
|
||||
|
||||
# Initialize the parent OpenAI client
|
||||
super().__init__(
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
default_headers=default_headers,
|
||||
)
|
||||
|
||||
self._supermemory_api_key = supermemory_api_key
|
||||
|
||||
# Disable unsupported endpoints
|
||||
self._disable_unsupported_endpoints()
|
||||
|
||||
def _disable_unsupported_endpoints(self) -> None:
|
||||
"""Disable all OpenAI endpoints except chat completions."""
|
||||
|
||||
def unsupported_error() -> None:
|
||||
raise RuntimeError(
|
||||
"Supermemory only supports chat completions. "
|
||||
"Use chat_completion() or chat.completions.create() instead."
|
||||
)
|
||||
|
||||
# List of endpoints to disable
|
||||
endpoints = [
|
||||
"embeddings",
|
||||
"fine_tuning",
|
||||
"images",
|
||||
"audio",
|
||||
"models",
|
||||
"moderations",
|
||||
"files",
|
||||
"batches",
|
||||
"uploads",
|
||||
"beta",
|
||||
]
|
||||
|
||||
# Override endpoints with error function
|
||||
for endpoint in endpoints:
|
||||
setattr(self, endpoint, property(lambda self: unsupported_error()))
|
||||
|
||||
async def create_chat_completion(
|
||||
self,
|
||||
**params: Unpack[CompletionCreateParams],
|
||||
) -> ChatCompletion:
|
||||
"""Create chat completions with infinite context support.
|
||||
|
||||
Args:
|
||||
**params: Parameters for chat completion
|
||||
|
||||
Returns:
|
||||
ChatCompletion response
|
||||
"""
|
||||
return await self.chat.completions.create(**params)
|
||||
|
||||
@overload
|
||||
async def chat_completion(
|
||||
self,
|
||||
messages: List[ChatCompletionMessageParam],
|
||||
*,
|
||||
model: Optional[str] = None,
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
tools: Optional[List[ChatCompletionToolParam]] = None,
|
||||
tool_choice: Optional[ChatCompletionToolChoiceOptionParam] = None,
|
||||
stream: Literal[False] = False,
|
||||
**kwargs: Unpack[CompletionCreateParams],
|
||||
) -> ChatCompletion: ...
|
||||
|
||||
@overload
|
||||
async def chat_completion(
|
||||
self,
|
||||
messages: List[ChatCompletionMessageParam],
|
||||
*,
|
||||
model: Optional[str] = None,
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
tools: Optional[List[ChatCompletionToolParam]] = None,
|
||||
tool_choice: Optional[ChatCompletionToolChoiceOptionParam] = None,
|
||||
stream: Literal[True],
|
||||
**kwargs: Unpack[CompletionCreateParams],
|
||||
) -> AsyncStream[ChatCompletion]: ...
|
||||
|
||||
async def chat_completion(
|
||||
self,
|
||||
messages: List[ChatCompletionMessageParam],
|
||||
*,
|
||||
model: Optional[str] = None,
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
tools: Optional[List[ChatCompletionToolParam]] = None,
|
||||
tool_choice: Optional[ChatCompletionToolChoiceOptionParam] = None,
|
||||
stream: bool = False,
|
||||
**kwargs: Unpack[CompletionCreateParams],
|
||||
) -> Union[ChatCompletion, AsyncStream[ChatCompletion]]:
|
||||
"""Create chat completions with simplified interface.
|
||||
|
||||
Args:
|
||||
messages: List of chat messages
|
||||
model: Model to use (defaults to gpt-4o)
|
||||
temperature: Sampling temperature
|
||||
max_tokens: Maximum tokens to generate
|
||||
tools: Available tools for function calling
|
||||
tool_choice: Tool choice strategy
|
||||
stream: Whether to stream the response
|
||||
**kwargs: Additional parameters
|
||||
|
||||
Returns:
|
||||
ChatCompletion response or stream
|
||||
"""
|
||||
params: Dict[
|
||||
str,
|
||||
Union[
|
||||
str,
|
||||
List[ChatCompletionMessageParam],
|
||||
List[ChatCompletionToolParam],
|
||||
ChatCompletionToolChoiceOptionParam,
|
||||
bool,
|
||||
float,
|
||||
int,
|
||||
],
|
||||
] = {
|
||||
"model": model or "gpt-4o",
|
||||
"messages": messages,
|
||||
**kwargs,
|
||||
}
|
||||
|
||||
# Add optional parameters if provided
|
||||
if temperature is not None:
|
||||
params["temperature"] = temperature
|
||||
if max_tokens is not None:
|
||||
params["max_tokens"] = max_tokens
|
||||
if tools is not None:
|
||||
params["tools"] = tools
|
||||
if tool_choice is not None:
|
||||
params["tool_choice"] = tool_choice
|
||||
if stream is not None:
|
||||
params["stream"] = stream
|
||||
|
||||
return await self.chat.completions.create(**params)
|
||||
|
||||
|
||||
def create_supermemory_openai(
|
||||
supermemory_api_key: str,
|
||||
config: Optional[SupermemoryInfiniteChatConfig] = None,
|
||||
) -> SupermemoryOpenAI:
|
||||
"""Helper function to create a SupermemoryOpenAI instance.
|
||||
|
||||
Args:
|
||||
supermemory_api_key: API key for Supermemory service
|
||||
config: Configuration for the AI provider
|
||||
|
||||
Returns:
|
||||
SupermemoryOpenAI instance
|
||||
"""
|
||||
return SupermemoryOpenAI(supermemory_api_key, config)
|
||||
|
|
@ -1,387 +0,0 @@
|
|||
"""Tests for infinite_chat module."""
|
||||
|
||||
import os
|
||||
import pytest
|
||||
from typing import List
|
||||
|
||||
from openai.types.chat import ChatCompletionMessageParam
|
||||
from ..src import (
|
||||
SupermemoryOpenAI,
|
||||
SupermemoryInfiniteChatConfigWithProviderName,
|
||||
SupermemoryInfiniteChatConfigWithProviderUrl,
|
||||
ProviderName,
|
||||
)
|
||||
|
||||
|
||||
# Test configuration
|
||||
PROVIDERS: List[ProviderName] = [
|
||||
"openai",
|
||||
"anthropic",
|
||||
"openrouter",
|
||||
"deepinfra",
|
||||
"groq",
|
||||
"google",
|
||||
"cloudflare",
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_api_key() -> str:
|
||||
"""Get test Supermemory API key from environment."""
|
||||
api_key = os.getenv("SUPERMEMORY_API_KEY")
|
||||
if not api_key:
|
||||
pytest.skip("SUPERMEMORY_API_KEY environment variable is required for tests")
|
||||
return api_key
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_provider_api_key() -> str:
|
||||
"""Get test provider API key from environment."""
|
||||
api_key = os.getenv("PROVIDER_API_KEY")
|
||||
if not api_key:
|
||||
pytest.skip("PROVIDER_API_KEY environment variable is required for tests")
|
||||
return api_key
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_provider_name() -> ProviderName:
|
||||
"""Get test provider name from environment."""
|
||||
provider_name = os.getenv("PROVIDER_NAME", "openai")
|
||||
if provider_name not in PROVIDERS:
|
||||
pytest.fail(f"Invalid provider name: {provider_name}")
|
||||
return provider_name # type: ignore
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_provider_url() -> str:
|
||||
"""Get test provider URL from environment."""
|
||||
return os.getenv("PROVIDER_URL", "")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_model_name() -> str:
|
||||
"""Get test model name from environment."""
|
||||
return os.getenv("MODEL_NAME", "gpt-4o-mini")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_headers() -> dict:
|
||||
"""Get test headers."""
|
||||
return {"custom-header": "test-value"}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_messages() -> List[List[ChatCompletionMessageParam]]:
|
||||
"""Test message sets."""
|
||||
return [
|
||||
[{"role": "user", "content": "Hello!"}],
|
||||
[
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "What is AI?"},
|
||||
],
|
||||
[
|
||||
{"role": "user", "content": "Tell me a joke"},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "Why don't scientists trust atoms? Because they make up everything!",
|
||||
},
|
||||
{"role": "user", "content": "Tell me another one"},
|
||||
],
|
||||
]
|
||||
|
||||
|
||||
class TestClientCreation:
|
||||
"""Test client creation."""
|
||||
|
||||
def test_create_client_with_provider_name(
|
||||
self,
|
||||
test_api_key: str,
|
||||
test_provider_api_key: str,
|
||||
test_provider_name: ProviderName,
|
||||
test_headers: dict,
|
||||
):
|
||||
"""Test creating client with provider name configuration."""
|
||||
config = SupermemoryInfiniteChatConfigWithProviderName(
|
||||
provider_name=test_provider_name,
|
||||
provider_api_key=test_provider_api_key,
|
||||
headers=test_headers,
|
||||
)
|
||||
|
||||
client = SupermemoryOpenAI(test_api_key, config)
|
||||
|
||||
assert client is not None
|
||||
assert client.chat is not None
|
||||
|
||||
def test_create_client_with_openai_provider(
|
||||
self, test_api_key: str, test_provider_api_key: str, test_headers: dict
|
||||
):
|
||||
"""Test creating client with OpenAI provider configuration."""
|
||||
config = SupermemoryInfiniteChatConfigWithProviderName(
|
||||
provider_name="openai",
|
||||
provider_api_key=test_provider_api_key,
|
||||
headers=test_headers,
|
||||
)
|
||||
|
||||
client = SupermemoryOpenAI(test_api_key, config)
|
||||
|
||||
assert client is not None
|
||||
|
||||
def test_create_client_with_custom_provider_url(
|
||||
self, test_api_key: str, test_provider_api_key: str, test_headers: dict
|
||||
):
|
||||
"""Test creating client with custom provider URL."""
|
||||
custom_url = "https://custom-provider.com/v1"
|
||||
config = SupermemoryInfiniteChatConfigWithProviderUrl(
|
||||
provider_url=custom_url,
|
||||
provider_api_key=test_provider_api_key,
|
||||
headers=test_headers,
|
||||
)
|
||||
|
||||
client = SupermemoryOpenAI(test_api_key, config)
|
||||
|
||||
assert client is not None
|
||||
|
||||
|
||||
class TestChatCompletions:
|
||||
"""Test chat completions functionality."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_chat_completion_simple_message(
|
||||
self,
|
||||
test_api_key: str,
|
||||
test_provider_api_key: str,
|
||||
test_provider_name: ProviderName,
|
||||
test_model_name: str,
|
||||
test_messages: List[List[ChatCompletionMessageParam]],
|
||||
):
|
||||
"""Test creating chat completion with simple message."""
|
||||
config = SupermemoryInfiniteChatConfigWithProviderName(
|
||||
provider_name=test_provider_name,
|
||||
provider_api_key=test_provider_api_key,
|
||||
headers={},
|
||||
)
|
||||
|
||||
client = SupermemoryOpenAI(test_api_key, config)
|
||||
|
||||
result = await client.create_chat_completion(
|
||||
model=test_model_name,
|
||||
messages=test_messages[0], # "Hello!"
|
||||
)
|
||||
|
||||
assert result is not None
|
||||
assert hasattr(result, "choices")
|
||||
assert len(result.choices) > 0
|
||||
assert result.choices[0].message.content is not None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_chat_completion_convenience_method(
|
||||
self,
|
||||
test_api_key: str,
|
||||
test_provider_api_key: str,
|
||||
test_provider_name: ProviderName,
|
||||
test_model_name: str,
|
||||
test_messages: List[List[ChatCompletionMessageParam]],
|
||||
):
|
||||
"""Test chat completion using convenience method."""
|
||||
config = SupermemoryInfiniteChatConfigWithProviderName(
|
||||
provider_name=test_provider_name,
|
||||
provider_api_key=test_provider_api_key,
|
||||
headers={},
|
||||
)
|
||||
|
||||
client = SupermemoryOpenAI(test_api_key, config)
|
||||
|
||||
result = await client.chat_completion(
|
||||
messages=test_messages[1], # System + user messages
|
||||
model=test_model_name,
|
||||
temperature=0.7,
|
||||
)
|
||||
|
||||
assert result is not None
|
||||
assert hasattr(result, "choices")
|
||||
assert len(result.choices) > 0
|
||||
assert result.choices[0].message.content is not None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_conversation_history(
|
||||
self,
|
||||
test_api_key: str,
|
||||
test_provider_api_key: str,
|
||||
test_provider_name: ProviderName,
|
||||
test_model_name: str,
|
||||
test_messages: List[List[ChatCompletionMessageParam]],
|
||||
):
|
||||
"""Test handling conversation history."""
|
||||
config = SupermemoryInfiniteChatConfigWithProviderName(
|
||||
provider_name=test_provider_name,
|
||||
provider_api_key=test_provider_api_key,
|
||||
headers={},
|
||||
)
|
||||
|
||||
client = SupermemoryOpenAI(test_api_key, config)
|
||||
|
||||
result = await client.chat_completion(
|
||||
messages=test_messages[2], # Multi-turn conversation
|
||||
model=test_model_name,
|
||||
)
|
||||
|
||||
assert result is not None
|
||||
assert hasattr(result, "choices")
|
||||
assert len(result.choices) > 0
|
||||
assert result.choices[0].message.content is not None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_custom_headers(
|
||||
self,
|
||||
test_api_key: str,
|
||||
test_provider_api_key: str,
|
||||
test_provider_name: ProviderName,
|
||||
test_model_name: str,
|
||||
):
|
||||
"""Test working with custom headers."""
|
||||
config = SupermemoryInfiniteChatConfigWithProviderName(
|
||||
provider_name=test_provider_name,
|
||||
provider_api_key=test_provider_api_key,
|
||||
headers={"x-custom-header": "test-value"},
|
||||
)
|
||||
|
||||
client = SupermemoryOpenAI(test_api_key, config)
|
||||
|
||||
result = await client.chat_completion(
|
||||
messages=[{"role": "user", "content": "Hello"}],
|
||||
model=test_model_name,
|
||||
)
|
||||
|
||||
assert result is not None
|
||||
assert hasattr(result, "choices")
|
||||
|
||||
|
||||
class TestConfigurationValidation:
|
||||
"""Test configuration validation."""
|
||||
|
||||
def test_handle_empty_headers_object(
|
||||
self,
|
||||
test_api_key: str,
|
||||
test_provider_api_key: str,
|
||||
test_provider_name: ProviderName,
|
||||
):
|
||||
"""Test handling empty headers object."""
|
||||
config = SupermemoryInfiniteChatConfigWithProviderName(
|
||||
provider_name=test_provider_name,
|
||||
provider_api_key=test_provider_api_key,
|
||||
headers={},
|
||||
)
|
||||
|
||||
client = SupermemoryOpenAI(test_api_key, config)
|
||||
|
||||
assert client is not None
|
||||
|
||||
def test_handle_configuration_without_headers(
|
||||
self,
|
||||
test_api_key: str,
|
||||
test_provider_api_key: str,
|
||||
test_provider_name: ProviderName,
|
||||
):
|
||||
"""Test handling configuration without headers."""
|
||||
config = SupermemoryInfiniteChatConfigWithProviderName(
|
||||
provider_name=test_provider_name,
|
||||
provider_api_key=test_provider_api_key,
|
||||
)
|
||||
|
||||
client = SupermemoryOpenAI(test_api_key, config)
|
||||
|
||||
assert client is not None
|
||||
|
||||
def test_handle_different_api_keys(self):
|
||||
"""Test handling different API keys."""
|
||||
config = SupermemoryInfiniteChatConfigWithProviderName(
|
||||
provider_name="openai",
|
||||
provider_api_key="different-provider-key",
|
||||
)
|
||||
|
||||
client = SupermemoryOpenAI("different-sm-key", config)
|
||||
|
||||
assert client is not None
|
||||
|
||||
|
||||
class TestDisabledEndpoints:
|
||||
"""Test that non-chat endpoints are disabled."""
|
||||
|
||||
def test_disabled_endpoints_throw_errors(
|
||||
self, test_api_key: str, test_provider_api_key: str
|
||||
):
|
||||
"""Test that all disabled endpoints throw appropriate errors."""
|
||||
config = SupermemoryInfiniteChatConfigWithProviderName(
|
||||
provider_name="openai",
|
||||
provider_api_key=test_provider_api_key,
|
||||
)
|
||||
|
||||
client = SupermemoryOpenAI(test_api_key, config)
|
||||
|
||||
# Test that all disabled endpoints throw appropriate errors
|
||||
with pytest.raises(
|
||||
RuntimeError, match="Supermemory only supports chat completions"
|
||||
):
|
||||
_ = client.embeddings
|
||||
|
||||
with pytest.raises(
|
||||
RuntimeError, match="Supermemory only supports chat completions"
|
||||
):
|
||||
_ = client.fine_tuning
|
||||
|
||||
with pytest.raises(
|
||||
RuntimeError, match="Supermemory only supports chat completions"
|
||||
):
|
||||
_ = client.images
|
||||
|
||||
with pytest.raises(
|
||||
RuntimeError, match="Supermemory only supports chat completions"
|
||||
):
|
||||
_ = client.audio
|
||||
|
||||
with pytest.raises(
|
||||
RuntimeError, match="Supermemory only supports chat completions"
|
||||
):
|
||||
_ = client.models
|
||||
|
||||
with pytest.raises(
|
||||
RuntimeError, match="Supermemory only supports chat completions"
|
||||
):
|
||||
_ = client.moderations
|
||||
|
||||
with pytest.raises(
|
||||
RuntimeError, match="Supermemory only supports chat completions"
|
||||
):
|
||||
_ = client.files
|
||||
|
||||
with pytest.raises(
|
||||
RuntimeError, match="Supermemory only supports chat completions"
|
||||
):
|
||||
_ = client.batches
|
||||
|
||||
with pytest.raises(
|
||||
RuntimeError, match="Supermemory only supports chat completions"
|
||||
):
|
||||
_ = client.uploads
|
||||
|
||||
with pytest.raises(
|
||||
RuntimeError, match="Supermemory only supports chat completions"
|
||||
):
|
||||
_ = client.beta
|
||||
|
||||
def test_chat_completions_still_work(
|
||||
self, test_api_key: str, test_provider_api_key: str
|
||||
):
|
||||
"""Test that chat completions still work after disabling other endpoints."""
|
||||
config = SupermemoryInfiniteChatConfigWithProviderName(
|
||||
provider_name="openai",
|
||||
provider_api_key=test_provider_api_key,
|
||||
)
|
||||
|
||||
client = SupermemoryOpenAI(test_api_key, config)
|
||||
|
||||
# Chat completions should still be accessible
|
||||
assert client.chat is not None
|
||||
assert client.chat.completions is not None
|
||||
assert callable(client.create_chat_completion)
|
||||
assert callable(client.chat_completion)
|
||||
|
|
@ -1,314 +0,0 @@
|
|||
# supermemory OpenAI SDK Utilities
|
||||
|
||||
OpenAI JS/TS SDK utilities for supermemory
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
npm install @supermemory/openai-sdk
|
||||
# or
|
||||
bun add @supermemory/openai-sdk
|
||||
# or
|
||||
pnpm add @supermemory/openai-sdk
|
||||
# or
|
||||
yarn add @supermemory/openai-sdk
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
Choose **one** of the following approaches (they cannot be used together):
|
||||
|
||||
- **Infinite Chat Client**: Enhanced OpenAI client with unlimited context support
|
||||
- **Memory Tools**: Search, add, and fetch memories from supermemory using OpenAI function calling
|
||||
|
||||
## Infinite Chat Client
|
||||
|
||||
The infinite chat client provides an enhanced OpenAI client with supermemory's context management.
|
||||
|
||||
```typescript
|
||||
import { SupermemoryOpenAI } from '@supermemory/openai-sdk'
|
||||
|
||||
// Using a named provider
|
||||
const client = new SupermemoryOpenAI('your-supermemory-api-key', {
|
||||
providerName: 'openai',
|
||||
providerApiKey: 'your-openai-api-key',
|
||||
headers: {
|
||||
// Optional additional headers
|
||||
}
|
||||
})
|
||||
|
||||
// Using a custom provider URL
|
||||
const client = new SupermemoryOpenAI('your-supermemory-api-key', {
|
||||
providerUrl: 'https://your-custom-provider.com/v1',
|
||||
providerApiKey: 'your-provider-api-key',
|
||||
headers: {
|
||||
// Optional additional headers
|
||||
}
|
||||
})
|
||||
|
||||
const response = await client.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{ role: 'user', content: 'Hello, how are you?' }
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
### Complete Infinite Chat Example
|
||||
|
||||
```typescript
|
||||
import { SupermemoryOpenAI } from '@supermemory/openai-sdk'
|
||||
|
||||
const supermemoryApiKey = process.env.SUPERMEMORY_API_KEY!
|
||||
const openaiApiKey = process.env.OPENAI_API_KEY!
|
||||
|
||||
// Initialize infinite chat client
|
||||
const client = new SupermemoryOpenAI(supermemoryApiKey, {
|
||||
providerName: 'openai',
|
||||
providerApiKey: openaiApiKey
|
||||
})
|
||||
|
||||
async function chat(userMessage: string) {
|
||||
const response = await client.chatCompletion([
|
||||
{
|
||||
role: 'system',
|
||||
content: 'You are a helpful assistant with unlimited context.'
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: userMessage
|
||||
}
|
||||
], {
|
||||
model: 'gpt-4o'
|
||||
})
|
||||
|
||||
return response.choices[0].message.content
|
||||
}
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
```typescript
|
||||
// Option 1: Use a named provider
|
||||
interface ConfigWithProviderName {
|
||||
providerName: 'openai' | 'anthropic' | 'openrouter' | 'deepinfra' | 'groq' | 'google' | 'cloudflare'
|
||||
providerApiKey: string
|
||||
headers?: Record<string, string>
|
||||
}
|
||||
|
||||
// Option 2: Use a custom provider URL
|
||||
interface ConfigWithProviderUrl {
|
||||
providerUrl: string
|
||||
providerApiKey: string
|
||||
headers?: Record<string, string>
|
||||
}
|
||||
```
|
||||
|
||||
## Memory Tools
|
||||
|
||||
supermemory tools allow OpenAI function calling to interact with user memories for enhanced context and personalization.
|
||||
|
||||
```typescript
|
||||
import { SupermemoryTools, executeMemoryToolCalls } from '@supermemory/openai-sdk'
|
||||
import OpenAI from 'openai'
|
||||
|
||||
const openai = new OpenAI({ apiKey: 'your-openai-api-key' })
|
||||
|
||||
const response = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{ role: 'user', content: 'What do you remember about my preferences?' }
|
||||
],
|
||||
tools: new SupermemoryTools('your-supermemory-api-key', {
|
||||
// Optional: specify a base URL for self-hosted instances
|
||||
baseUrl: 'https://api.supermemory.com',
|
||||
|
||||
// Use either projectId OR containerTags, not both
|
||||
projectId: 'your-project-id',
|
||||
// OR
|
||||
containerTags: ['tag1', 'tag2']
|
||||
}).getToolDefinitions()
|
||||
})
|
||||
```
|
||||
|
||||
### Complete Memory Tools Example
|
||||
|
||||
```typescript
|
||||
import { SupermemoryTools, executeMemoryToolCalls } from '@supermemory/openai-sdk'
|
||||
import OpenAI from 'openai'
|
||||
|
||||
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY! })
|
||||
const supermemoryApiKey = process.env.SUPERMEMORY_API_KEY!
|
||||
|
||||
async function chatWithTools(userMessage: string) {
|
||||
const tools = new SupermemoryTools(supermemoryApiKey, {
|
||||
projectId: 'my-project'
|
||||
})
|
||||
|
||||
const response = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
content: 'You are a helpful assistant with access to user memories.'
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: userMessage
|
||||
}
|
||||
],
|
||||
tools: tools.getToolDefinitions()
|
||||
})
|
||||
|
||||
// Handle tool calls if present
|
||||
if (response.choices[0].message.tool_calls) {
|
||||
const toolResults = await executeMemoryToolCalls(
|
||||
supermemoryApiKey,
|
||||
response.choices[0].message.tool_calls,
|
||||
{ projectId: 'my-project' }
|
||||
)
|
||||
|
||||
// Continue conversation with tool results...
|
||||
}
|
||||
|
||||
return response.choices[0].message.content
|
||||
}
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
```typescript
|
||||
interface SupermemoryToolsConfig {
|
||||
// Optional: Base URL for API calls (default: https://api.supermemory.com)
|
||||
baseUrl?: string
|
||||
|
||||
// Container tags for organizing memories (cannot be used with projectId)
|
||||
containerTags?: string[]
|
||||
|
||||
// Project ID for scoping memories (cannot be used with containerTags)
|
||||
projectId?: string
|
||||
}
|
||||
```
|
||||
|
||||
### Self-Hosted supermemory
|
||||
|
||||
If you're running a self-hosted supermemory instance:
|
||||
|
||||
```typescript
|
||||
const tools = new SupermemoryTools('your-api-key', {
|
||||
baseUrl: 'https://your-supermemory-instance.com',
|
||||
containerTags: ['production', 'user-memories']
|
||||
})
|
||||
```
|
||||
|
||||
### Available Tools
|
||||
|
||||
##### Search Memories
|
||||
|
||||
Search through user memories using semantic matching.
|
||||
|
||||
```typescript
|
||||
const searchResult = await tools.searchMemories({
|
||||
informationToGet: 'user preferences about coffee'
|
||||
})
|
||||
```
|
||||
|
||||
##### Add Memory
|
||||
|
||||
Add new memories to the user's memory store.
|
||||
|
||||
```typescript
|
||||
const addResult = await tools.addMemory({
|
||||
memory: 'User prefers dark roast coffee in the morning'
|
||||
})
|
||||
```
|
||||
|
||||
##### Fetch Memory
|
||||
|
||||
Retrieve a specific memory by its ID.
|
||||
|
||||
```typescript
|
||||
const fetchResult = await tools.fetchMemory({
|
||||
memoryId: 'memory-id-123'
|
||||
})
|
||||
```
|
||||
|
||||
### Using Individual Tools
|
||||
|
||||
For more flexibility, you can import and use individual tools:
|
||||
|
||||
```typescript
|
||||
import {
|
||||
createSearchMemoriesTool,
|
||||
createAddMemoryTool,
|
||||
createFetchMemoryTool
|
||||
} from '@supermemory/openai-sdk'
|
||||
|
||||
const searchTool = createSearchMemoriesTool('your-api-key', {
|
||||
projectId: 'your-project-id'
|
||||
})
|
||||
|
||||
// Use only the search tool
|
||||
const response = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: [...],
|
||||
tools: [searchTool.definition]
|
||||
})
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
All tool executions return a result object with a `success` field:
|
||||
|
||||
```typescript
|
||||
const result = await tools.searchMemories({
|
||||
informationToGet: 'user preferences'
|
||||
})
|
||||
|
||||
if (result.success) {
|
||||
console.log('Found memories:', result.results)
|
||||
console.log('Total count:', result.count)
|
||||
} else {
|
||||
console.error('Error searching memories:', result.error)
|
||||
}
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
### Running Tests
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
bun test
|
||||
|
||||
# Run tests in watch mode
|
||||
bun test --watch
|
||||
```
|
||||
|
||||
#### Environment Variables for Tests
|
||||
|
||||
All tests require API keys to run. Copy `.env.example` to `.env` and set the required values:
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
**Required:**
|
||||
- `SUPERMEMORY_API_KEY`: Your Supermemory API key
|
||||
- `PROVIDER_API_KEY`: Your AI provider API key (OpenAI, etc.)
|
||||
|
||||
**Optional:**
|
||||
- `SUPERMEMORY_BASE_URL`: Custom Supermemory base URL (defaults to `https://api.supermemory.ai`)
|
||||
- `PROVIDER_NAME`: Provider name (defaults to `openai`) - one of: `openai`, `anthropic`, `openrouter`, `deepinfra`, `groq`, `google`, `cloudflare`
|
||||
- `PROVIDER_URL`: Custom provider URL (use instead of `PROVIDER_NAME`)
|
||||
- `MODEL_NAME`: Model to use in tests (defaults to `gpt-4o-mini`)
|
||||
|
||||
Tests will fail if required API keys are not provided.
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
## Support
|
||||
|
||||
Email our [24/7 Founder/CEO/Support Executive](mailto:dhravya@supermemory.com)
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
export * from "./infinite-chat"
|
||||
export * from "./tools"
|
||||
|
|
@ -1,338 +0,0 @@
|
|||
import { describe, expect, it } from "vitest"
|
||||
import z from "zod"
|
||||
import type OpenAI from "openai"
|
||||
import {
|
||||
SupermemoryOpenAI,
|
||||
type SupermemoryInfiniteChatConfig,
|
||||
} from "./infinite-chat"
|
||||
|
||||
import "dotenv/config"
|
||||
|
||||
const providers = z.enum([
|
||||
"openai",
|
||||
"anthropic",
|
||||
"openrouter",
|
||||
"deepinfra",
|
||||
"groq",
|
||||
"google",
|
||||
"cloudflare",
|
||||
] satisfies SupermemoryInfiniteChatConfig["providerName"][])
|
||||
|
||||
describe("SupermemoryOpenAI", () => {
|
||||
// Required API keys - tests will fail if not provided
|
||||
const testApiKey = process.env.SUPERMEMORY_API_KEY
|
||||
const testProviderApiKey = process.env.PROVIDER_API_KEY
|
||||
|
||||
if (!testApiKey) {
|
||||
throw new Error(
|
||||
"SUPERMEMORY_API_KEY environment variable is required for tests",
|
||||
)
|
||||
}
|
||||
if (!testProviderApiKey) {
|
||||
throw new Error(
|
||||
"PROVIDER_API_KEY environment variable is required for tests",
|
||||
)
|
||||
}
|
||||
|
||||
// Optional configuration with defaults
|
||||
const testProviderName = providers.parse(
|
||||
process.env.PROVIDER_NAME ?? "openai",
|
||||
)
|
||||
const testProviderUrl = process.env.PROVIDER_URL
|
||||
const testModelName = process.env.MODEL_NAME || "gpt-4o-mini"
|
||||
const testHeaders = { "custom-header": "test-value" }
|
||||
|
||||
// Validate provider configuration - either name OR URL, not both
|
||||
if (testProviderUrl && process.env.PROVIDER_NAME) {
|
||||
throw new Error(
|
||||
"Cannot specify both PROVIDER_NAME and PROVIDER_URL - use one or the other",
|
||||
)
|
||||
}
|
||||
|
||||
// Test prompts
|
||||
const testMessages: OpenAI.Chat.Completions.ChatCompletionMessageParam[][] = [
|
||||
[{ role: "user", content: "Hello!" }],
|
||||
[
|
||||
{ role: "system", content: "You are a helpful assistant." },
|
||||
{ role: "user", content: "What is AI?" },
|
||||
],
|
||||
[
|
||||
{ role: "user", content: "Tell me a joke" },
|
||||
{
|
||||
role: "assistant",
|
||||
content:
|
||||
"Why don't scientists trust atoms? Because they make up everything!",
|
||||
},
|
||||
{ role: "user", content: "Tell me another one" },
|
||||
],
|
||||
]
|
||||
|
||||
describe("client creation", () => {
|
||||
it("should create client with SupermemoryOpenAI class", () => {
|
||||
const config: SupermemoryInfiniteChatConfig = testProviderUrl
|
||||
? {
|
||||
providerUrl: testProviderUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: testHeaders,
|
||||
}
|
||||
: {
|
||||
providerName: testProviderName,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: testHeaders,
|
||||
}
|
||||
|
||||
const client = new SupermemoryOpenAI(testApiKey, config)
|
||||
|
||||
expect(client).toBeDefined()
|
||||
expect(client.chat).toBeDefined()
|
||||
})
|
||||
|
||||
it("should create client with openai provider configuration", () => {
|
||||
const config: SupermemoryInfiniteChatConfig = {
|
||||
providerName: "openai",
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: testHeaders,
|
||||
}
|
||||
|
||||
const client = new SupermemoryOpenAI(testApiKey, config)
|
||||
|
||||
expect(client).toBeDefined()
|
||||
})
|
||||
|
||||
it("should create client with custom provider URL", () => {
|
||||
const customUrl = "https://custom-provider.com/v1"
|
||||
const config: SupermemoryInfiniteChatConfig = {
|
||||
providerUrl: customUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: testHeaders,
|
||||
}
|
||||
|
||||
const client = new SupermemoryOpenAI(testApiKey, config)
|
||||
|
||||
expect(client).toBeDefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe("chat completions", () => {
|
||||
it("should create chat completion with simple message", async () => {
|
||||
const config: SupermemoryInfiniteChatConfig = testProviderUrl
|
||||
? {
|
||||
providerUrl: testProviderUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
: {
|
||||
providerName: testProviderName,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
|
||||
const client = new SupermemoryOpenAI(testApiKey, config)
|
||||
|
||||
const result = await client.createChatCompletion({
|
||||
model: testModelName,
|
||||
messages: testMessages[0]!, // "Hello!"
|
||||
})
|
||||
|
||||
expect(result).toBeDefined()
|
||||
expect("choices" in result).toBe(true)
|
||||
if ("choices" in result) {
|
||||
expect(result.choices).toBeDefined()
|
||||
expect(result.choices.length).toBeGreaterThan(0)
|
||||
expect(result.choices[0]!.message.content).toBeDefined()
|
||||
}
|
||||
})
|
||||
|
||||
it("should create chat completion using convenience method", async () => {
|
||||
const config: SupermemoryInfiniteChatConfig = testProviderUrl
|
||||
? {
|
||||
providerUrl: testProviderUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
: {
|
||||
providerName: testProviderName,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
|
||||
const client = new SupermemoryOpenAI(testApiKey, config)
|
||||
|
||||
const result = await client.chatCompletion(testMessages[1]!, {
|
||||
model: testModelName,
|
||||
temperature: 0.7,
|
||||
})
|
||||
|
||||
expect(result).toBeDefined()
|
||||
expect("choices" in result).toBe(true)
|
||||
if ("choices" in result) {
|
||||
expect(result.choices).toBeDefined()
|
||||
expect(result.choices.length).toBeGreaterThan(0)
|
||||
expect(result.choices[0]!.message.content).toBeDefined()
|
||||
}
|
||||
})
|
||||
|
||||
it("should handle conversation history", async () => {
|
||||
const config: SupermemoryInfiniteChatConfig = testProviderUrl
|
||||
? {
|
||||
providerUrl: testProviderUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
: {
|
||||
providerName: testProviderName,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
|
||||
const client = new SupermemoryOpenAI(testApiKey, config)
|
||||
|
||||
const result = await client.chatCompletion(testMessages[2]!, {
|
||||
model: testModelName,
|
||||
})
|
||||
|
||||
expect(result).toBeDefined()
|
||||
expect("choices" in result).toBe(true)
|
||||
if ("choices" in result) {
|
||||
expect(result.choices).toBeDefined()
|
||||
expect(result.choices.length).toBeGreaterThan(0)
|
||||
expect(result.choices[0]!.message.content).toBeDefined()
|
||||
}
|
||||
})
|
||||
|
||||
it("should work with custom headers", async () => {
|
||||
const config: SupermemoryInfiniteChatConfig = testProviderUrl
|
||||
? {
|
||||
providerUrl: testProviderUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {
|
||||
"x-custom-header": "test-value",
|
||||
},
|
||||
}
|
||||
: {
|
||||
providerName: testProviderName,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {
|
||||
"x-custom-header": "test-value",
|
||||
},
|
||||
}
|
||||
|
||||
const client = new SupermemoryOpenAI(testApiKey, config)
|
||||
|
||||
const result = await client.chatCompletion(
|
||||
[{ role: "user", content: "Hello" }],
|
||||
{
|
||||
model: testModelName,
|
||||
},
|
||||
)
|
||||
|
||||
expect(result).toBeDefined()
|
||||
expect("choices" in result).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe("configuration validation", () => {
|
||||
it("should handle empty headers object", () => {
|
||||
const config: SupermemoryInfiniteChatConfig = testProviderUrl
|
||||
? {
|
||||
providerUrl: testProviderUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
: {
|
||||
providerName: testProviderName,
|
||||
providerApiKey: testProviderApiKey,
|
||||
headers: {},
|
||||
}
|
||||
|
||||
const client = new SupermemoryOpenAI(testApiKey, config)
|
||||
|
||||
expect(client).toBeDefined()
|
||||
})
|
||||
|
||||
it("should handle configuration without headers", () => {
|
||||
const config: SupermemoryInfiniteChatConfig = testProviderUrl
|
||||
? {
|
||||
providerUrl: testProviderUrl,
|
||||
providerApiKey: testProviderApiKey,
|
||||
}
|
||||
: {
|
||||
providerName: testProviderName,
|
||||
providerApiKey: testProviderApiKey,
|
||||
}
|
||||
|
||||
const client = new SupermemoryOpenAI(testApiKey, config)
|
||||
|
||||
expect(client).toBeDefined()
|
||||
})
|
||||
|
||||
it("should handle different API keys", () => {
|
||||
const config: SupermemoryInfiniteChatConfig = {
|
||||
providerName: "openai",
|
||||
providerApiKey: "different-provider-key",
|
||||
}
|
||||
|
||||
const client = new SupermemoryOpenAI("different-sm-key", config)
|
||||
|
||||
expect(client).toBeDefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe("disabled endpoints", () => {
|
||||
it("should throw errors for disabled OpenAI endpoints", () => {
|
||||
const config: SupermemoryInfiniteChatConfig = {
|
||||
providerName: "openai",
|
||||
providerApiKey: testProviderApiKey,
|
||||
}
|
||||
|
||||
const client = new SupermemoryOpenAI(testApiKey, config)
|
||||
|
||||
// Test that all disabled endpoints throw appropriate errors
|
||||
expect(() => client.embeddings).toThrow(
|
||||
"Supermemory only supports chat completions",
|
||||
)
|
||||
expect(() => client.fineTuning).toThrow(
|
||||
"Supermemory only supports chat completions",
|
||||
)
|
||||
expect(() => client.images).toThrow(
|
||||
"Supermemory only supports chat completions",
|
||||
)
|
||||
expect(() => client.audio).toThrow(
|
||||
"Supermemory only supports chat completions",
|
||||
)
|
||||
expect(() => client.models).toThrow(
|
||||
"Supermemory only supports chat completions",
|
||||
)
|
||||
expect(() => client.moderations).toThrow(
|
||||
"Supermemory only supports chat completions",
|
||||
)
|
||||
expect(() => client.files).toThrow(
|
||||
"Supermemory only supports chat completions",
|
||||
)
|
||||
expect(() => client.batches).toThrow(
|
||||
"Supermemory only supports chat completions",
|
||||
)
|
||||
expect(() => client.uploads).toThrow(
|
||||
"Supermemory only supports chat completions",
|
||||
)
|
||||
expect(() => client.beta).toThrow(
|
||||
"Supermemory only supports chat completions",
|
||||
)
|
||||
})
|
||||
|
||||
it("should still allow chat completions to work", () => {
|
||||
const config: SupermemoryInfiniteChatConfig = {
|
||||
providerName: "openai",
|
||||
providerApiKey: testProviderApiKey,
|
||||
}
|
||||
|
||||
const client = new SupermemoryOpenAI(testApiKey, config)
|
||||
|
||||
// Chat completions should still be accessible
|
||||
expect(client.chat).toBeDefined()
|
||||
expect(client.chat.completions).toBeDefined()
|
||||
expect(client.createChatCompletion).toBeDefined()
|
||||
expect(client.chatCompletion).toBeDefined()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -1,133 +0,0 @@
|
|||
import OpenAI from "openai"
|
||||
|
||||
interface SupermemoryInfiniteChatConfigBase {
|
||||
providerApiKey: string
|
||||
headers?: Record<string, string>
|
||||
}
|
||||
|
||||
interface SupermemoryInfiniteChatConfigWithProviderName
|
||||
extends SupermemoryInfiniteChatConfigBase {
|
||||
providerName: keyof typeof providerMap
|
||||
providerUrl?: never
|
||||
}
|
||||
|
||||
interface SupermemoryInfiniteChatConfigWithProviderUrl
|
||||
extends SupermemoryInfiniteChatConfigBase {
|
||||
providerUrl: string
|
||||
providerName?: never
|
||||
}
|
||||
|
||||
export type SupermemoryInfiniteChatConfig =
|
||||
| SupermemoryInfiniteChatConfigWithProviderName
|
||||
| SupermemoryInfiniteChatConfigWithProviderUrl
|
||||
|
||||
type SupermemoryApiKey = string
|
||||
|
||||
const providerMap = {
|
||||
openai: "https://api.openai.com/v1",
|
||||
anthropic: "https://api.anthropic.com/v1",
|
||||
openrouter: "https://openrouter.ai/api/v1",
|
||||
deepinfra: "https://api.deepinfra.com/v1/openai",
|
||||
groq: "https://api.groq.com/openai/v1",
|
||||
google: "https://generativelanguage.googleapis.com/v1beta/openai",
|
||||
cloudflare: "https://gateway.ai.cloudflare.com/v1/*/unlimited-context/openai",
|
||||
} as const
|
||||
|
||||
/**
|
||||
* Enhanced OpenAI client with supermemory integration
|
||||
* Only chat completions are supported - all other OpenAI API endpoints are disabled
|
||||
*/
|
||||
export class SupermemoryOpenAI extends OpenAI {
|
||||
private supermemoryApiKey: string
|
||||
|
||||
constructor(
|
||||
supermemoryApiKey: SupermemoryApiKey,
|
||||
config?: SupermemoryInfiniteChatConfig,
|
||||
) {
|
||||
const baseURL = config?.providerName
|
||||
? providerMap[config.providerName]
|
||||
: (config?.providerUrl ?? "https://api.openai.com/v1")
|
||||
|
||||
super({
|
||||
apiKey: config?.providerApiKey,
|
||||
baseURL,
|
||||
defaultHeaders: {
|
||||
"x-supermemory-api-key": supermemoryApiKey,
|
||||
...config?.headers,
|
||||
},
|
||||
})
|
||||
|
||||
this.supermemoryApiKey = supermemoryApiKey
|
||||
|
||||
// Disable all non-chat completion endpoints
|
||||
this.disableUnsupportedEndpoints()
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable all OpenAI endpoints except chat completions
|
||||
*/
|
||||
private disableUnsupportedEndpoints() {
|
||||
const unsupportedError = (): never => {
|
||||
throw new Error(
|
||||
"Supermemory only supports chat completions. Use chatCompletion() or chat.completions.create() instead.",
|
||||
)
|
||||
}
|
||||
|
||||
// Override all other OpenAI API endpoints using Object.defineProperty
|
||||
const endpoints = [
|
||||
"embeddings",
|
||||
"fineTuning",
|
||||
"images",
|
||||
"audio",
|
||||
"models",
|
||||
"moderations",
|
||||
"files",
|
||||
"batches",
|
||||
"uploads",
|
||||
"beta",
|
||||
]
|
||||
|
||||
for (const endpoint of endpoints) {
|
||||
Object.defineProperty(this, endpoint, {
|
||||
get: unsupportedError,
|
||||
configurable: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create chat completions with infinite context support
|
||||
*/
|
||||
async createChatCompletion<
|
||||
T extends OpenAI.Chat.Completions.ChatCompletionCreateParams,
|
||||
>(params: T) {
|
||||
return this.chat.completions.create(params)
|
||||
}
|
||||
|
||||
/**
|
||||
* Create chat completions with simplified interface
|
||||
*/
|
||||
async chatCompletion(
|
||||
messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[],
|
||||
options?: {
|
||||
model?: string
|
||||
temperature?: number
|
||||
maxTokens?: number
|
||||
tools?: OpenAI.Chat.Completions.ChatCompletionTool[]
|
||||
toolChoice?: OpenAI.Chat.Completions.ChatCompletionToolChoiceOption
|
||||
stream?: boolean
|
||||
},
|
||||
) {
|
||||
const params = {
|
||||
model: options?.model ?? "gpt-4o",
|
||||
messages,
|
||||
temperature: options?.temperature,
|
||||
max_tokens: options?.maxTokens,
|
||||
tools: options?.tools,
|
||||
tool_choice: options?.toolChoice,
|
||||
stream: options?.stream,
|
||||
} satisfies OpenAI.Chat.Completions.ChatCompletionCreateParams
|
||||
|
||||
return this.chat.completions.create(params)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,297 +0,0 @@
|
|||
import { describe, expect, it } from "vitest"
|
||||
import {
|
||||
SupermemoryTools,
|
||||
createSupermemoryTools,
|
||||
getMemoryToolDefinitions,
|
||||
executeMemoryToolCalls,
|
||||
createSearchMemoriesTool,
|
||||
createAddMemoryTool,
|
||||
type SupermemoryToolsConfig,
|
||||
} from "./tools"
|
||||
import { SupermemoryOpenAI } from "./infinite-chat"
|
||||
|
||||
import "dotenv/config"
|
||||
|
||||
describe("SupermemoryTools", () => {
|
||||
// Required API keys - tests will fail if not provided
|
||||
const testApiKey = process.env.SUPERMEMORY_API_KEY
|
||||
const testProviderApiKey = process.env.PROVIDER_API_KEY
|
||||
|
||||
if (!testApiKey) {
|
||||
throw new Error(
|
||||
"SUPERMEMORY_API_KEY environment variable is required for tests",
|
||||
)
|
||||
}
|
||||
if (!testProviderApiKey) {
|
||||
throw new Error(
|
||||
"PROVIDER_API_KEY environment variable is required for tests",
|
||||
)
|
||||
}
|
||||
|
||||
// Optional configuration with defaults
|
||||
const testBaseUrl = process.env.SUPERMEMORY_BASE_URL ?? undefined
|
||||
const testModelName = process.env.MODEL_NAME || "gpt-4o-mini"
|
||||
|
||||
describe("tool initialization", () => {
|
||||
it("should create tools with default configuration", () => {
|
||||
const config: SupermemoryToolsConfig = {}
|
||||
const tools = new SupermemoryTools(testApiKey, config)
|
||||
|
||||
expect(tools).toBeDefined()
|
||||
expect(tools.getToolDefinitions()).toBeDefined()
|
||||
expect(tools.getToolDefinitions().length).toBe(3)
|
||||
})
|
||||
|
||||
it("should create tools with createSupermemoryTools helper", () => {
|
||||
const tools = createSupermemoryTools(testApiKey, {
|
||||
projectId: "test-project",
|
||||
})
|
||||
|
||||
expect(tools).toBeDefined()
|
||||
expect(tools.getToolDefinitions()).toBeDefined()
|
||||
})
|
||||
|
||||
it("should create tools with custom baseUrl", () => {
|
||||
const config: SupermemoryToolsConfig = {
|
||||
baseUrl: testBaseUrl,
|
||||
}
|
||||
const tools = new SupermemoryTools(testApiKey, config)
|
||||
|
||||
expect(tools).toBeDefined()
|
||||
expect(tools.getToolDefinitions().length).toBe(3)
|
||||
})
|
||||
|
||||
it("should create tools with projectId configuration", () => {
|
||||
const config: SupermemoryToolsConfig = {
|
||||
projectId: "test-project-123",
|
||||
}
|
||||
const tools = new SupermemoryTools(testApiKey, config)
|
||||
|
||||
expect(tools).toBeDefined()
|
||||
expect(tools.getToolDefinitions().length).toBe(3)
|
||||
})
|
||||
|
||||
it("should create tools with custom container tags", () => {
|
||||
const config: SupermemoryToolsConfig = {
|
||||
containerTags: ["custom-tag-1", "custom-tag-2"],
|
||||
}
|
||||
const tools = new SupermemoryTools(testApiKey, config)
|
||||
|
||||
expect(tools).toBeDefined()
|
||||
expect(tools.getToolDefinitions().length).toBe(3)
|
||||
})
|
||||
})
|
||||
|
||||
describe("tool definitions", () => {
|
||||
it("should return proper OpenAI function definitions", () => {
|
||||
const definitions = getMemoryToolDefinitions()
|
||||
|
||||
expect(definitions).toBeDefined()
|
||||
expect(definitions.length).toBe(3)
|
||||
|
||||
// Check searchMemories
|
||||
const searchTool = definitions.find(
|
||||
(d) => d.function.name === "searchMemories",
|
||||
)
|
||||
expect(searchTool).toBeDefined()
|
||||
expect(searchTool!.type).toBe("function")
|
||||
expect(searchTool!.function.parameters?.required).toContain(
|
||||
"informationToGet",
|
||||
)
|
||||
|
||||
// Check addMemory
|
||||
const addTool = definitions.find((d) => d.function.name === "addMemory")
|
||||
expect(addTool).toBeDefined()
|
||||
expect(addTool!.type).toBe("function")
|
||||
expect(addTool!.function.parameters?.required).toContain("memory")
|
||||
})
|
||||
|
||||
it("should have consistent tool definitions from class and helper", () => {
|
||||
const tools = new SupermemoryTools(testApiKey)
|
||||
const classDefinitions = tools.getToolDefinitions()
|
||||
const helperDefinitions = getMemoryToolDefinitions()
|
||||
|
||||
expect(classDefinitions).toEqual(helperDefinitions)
|
||||
})
|
||||
})
|
||||
|
||||
describe("memory operations", () => {
|
||||
it("should search memories", async () => {
|
||||
const tools = new SupermemoryTools(testApiKey, {
|
||||
projectId: "test-search",
|
||||
baseUrl: testBaseUrl,
|
||||
})
|
||||
|
||||
const result = await tools.searchMemories({
|
||||
informationToGet: "test preferences",
|
||||
limit: 5,
|
||||
})
|
||||
|
||||
expect(result).toBeDefined()
|
||||
expect(result.success).toBeDefined()
|
||||
expect(typeof result.success).toBe("boolean")
|
||||
|
||||
if (result.success) {
|
||||
expect(result.results).toBeDefined()
|
||||
expect(result.count).toBeDefined()
|
||||
expect(typeof result.count).toBe("number")
|
||||
} else {
|
||||
expect(result.error).toBeDefined()
|
||||
}
|
||||
})
|
||||
|
||||
it("should add memory", async () => {
|
||||
const tools = new SupermemoryTools(testApiKey, {
|
||||
containerTags: ["test-add-memory"],
|
||||
baseUrl: testBaseUrl,
|
||||
})
|
||||
|
||||
const result = await tools.addMemory({
|
||||
memory: "User prefers dark roast coffee in the morning - test memory",
|
||||
})
|
||||
|
||||
expect(result).toBeDefined()
|
||||
expect(result.success).toBeDefined()
|
||||
expect(typeof result.success).toBe("boolean")
|
||||
|
||||
if (result.success) {
|
||||
expect(result.memory).toBeDefined()
|
||||
} else {
|
||||
expect(result.error).toBeDefined()
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
describe("individual tool creators", () => {
|
||||
it("should create individual search tool", () => {
|
||||
const searchTool = createSearchMemoriesTool(testApiKey, {
|
||||
projectId: "test-individual",
|
||||
})
|
||||
|
||||
expect(searchTool).toBeDefined()
|
||||
expect(searchTool.definition).toBeDefined()
|
||||
expect(searchTool.execute).toBeDefined()
|
||||
expect(searchTool.definition.function.name).toBe("searchMemories")
|
||||
})
|
||||
|
||||
it("should create individual add tool", () => {
|
||||
const addTool = createAddMemoryTool(testApiKey, {
|
||||
projectId: "test-individual",
|
||||
})
|
||||
|
||||
expect(addTool).toBeDefined()
|
||||
expect(addTool.definition).toBeDefined()
|
||||
expect(addTool.execute).toBeDefined()
|
||||
expect(addTool.definition.function.name).toBe("addMemory")
|
||||
})
|
||||
})
|
||||
|
||||
describe("OpenAI integration", () => {
|
||||
it("should work with SupermemoryOpenAI for function calling", async () => {
|
||||
const client = new SupermemoryOpenAI(testApiKey, {
|
||||
providerName: "openai",
|
||||
providerApiKey: testProviderApiKey,
|
||||
})
|
||||
|
||||
const tools = new SupermemoryTools(testApiKey, {
|
||||
projectId: "test-openai-integration",
|
||||
baseUrl: testBaseUrl,
|
||||
})
|
||||
|
||||
const response = await client.chatCompletion(
|
||||
[
|
||||
{
|
||||
role: "system",
|
||||
content:
|
||||
"You are a helpful assistant with access to user memories. When the user asks you to remember something, use the addMemory tool.",
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: "Please remember that I prefer tea over coffee",
|
||||
},
|
||||
],
|
||||
{
|
||||
model: testModelName,
|
||||
tools: tools.getToolDefinitions(),
|
||||
},
|
||||
)
|
||||
|
||||
expect(response).toBeDefined()
|
||||
expect("choices" in response).toBe(true)
|
||||
|
||||
if ("choices" in response) {
|
||||
const choice = response.choices[0]!
|
||||
expect(choice.message).toBeDefined()
|
||||
|
||||
// If the model decided to use function calling, test the execution
|
||||
if (choice.message.tool_calls && choice.message.tool_calls.length > 0) {
|
||||
const toolResults = await executeMemoryToolCalls(
|
||||
testApiKey,
|
||||
choice.message.tool_calls,
|
||||
{
|
||||
projectId: "test-openai-integration",
|
||||
baseUrl: testBaseUrl,
|
||||
},
|
||||
)
|
||||
|
||||
expect(toolResults).toBeDefined()
|
||||
expect(toolResults.length).toBe(choice.message.tool_calls.length)
|
||||
|
||||
for (const result of toolResults) {
|
||||
expect(result.role).toBe("tool")
|
||||
expect(result.content).toBeDefined()
|
||||
expect(result.tool_call_id).toBeDefined()
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
it("should handle multiple tool calls", async () => {
|
||||
const tools = new SupermemoryTools(testApiKey, {
|
||||
containerTags: ["test-multi-tools"],
|
||||
baseUrl: testBaseUrl,
|
||||
})
|
||||
|
||||
// Simulate tool calls (normally these would come from OpenAI)
|
||||
const mockToolCalls = [
|
||||
{
|
||||
id: "call_1",
|
||||
type: "function" as const,
|
||||
function: {
|
||||
name: "searchMemories",
|
||||
arguments: JSON.stringify({ informationToGet: "preferences" }),
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "call_2",
|
||||
type: "function" as const,
|
||||
function: {
|
||||
name: "addMemory",
|
||||
arguments: JSON.stringify({
|
||||
memory: "Test memory for multiple calls",
|
||||
}),
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
const results = await executeMemoryToolCalls(testApiKey, mockToolCalls, {
|
||||
containerTags: ["test-multi-tools"],
|
||||
baseUrl: testBaseUrl,
|
||||
})
|
||||
|
||||
expect(results).toBeDefined()
|
||||
expect(results.length).toBe(2)
|
||||
|
||||
expect(results[0]!.tool_call_id).toBe("call_1")
|
||||
expect(results[1]!.tool_call_id).toBe("call_2")
|
||||
|
||||
for (const result of results) {
|
||||
expect(result.role).toBe("tool")
|
||||
expect(result.content).toBeDefined()
|
||||
|
||||
const content = JSON.parse(result.content as string)
|
||||
expect(content.success).toBeDefined()
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -1,299 +0,0 @@
|
|||
import type OpenAI from "openai"
|
||||
import Supermemory from "supermemory"
|
||||
|
||||
/**
|
||||
* Supermemory configuration
|
||||
* Only one of `projectId` or `containerTags` can be provided.
|
||||
*/
|
||||
export interface SupermemoryToolsConfig {
|
||||
baseUrl?: string
|
||||
containerTags?: string[]
|
||||
projectId?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Result types for memory operations
|
||||
*/
|
||||
export interface MemorySearchResult {
|
||||
success: boolean
|
||||
results?: Awaited<ReturnType<Supermemory["search"]["execute"]>>["results"]
|
||||
count?: number
|
||||
error?: string
|
||||
}
|
||||
|
||||
export interface MemoryAddResult {
|
||||
success: boolean
|
||||
memory?: Awaited<ReturnType<Supermemory["memories"]["add"]>>
|
||||
error?: string
|
||||
}
|
||||
|
||||
export interface MemoryFetchResult {
|
||||
success: boolean
|
||||
memory?: Awaited<ReturnType<Supermemory["memories"]["get"]>>
|
||||
error?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Function schemas for OpenAI function calling
|
||||
*/
|
||||
export const memoryToolSchemas = {
|
||||
searchMemories: {
|
||||
name: "searchMemories",
|
||||
description:
|
||||
"Search (recall) memories/details/information about the user or other facts or entities. Run when explicitly asked or when context about user's past choices would be helpful.",
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: {
|
||||
informationToGet: {
|
||||
type: "string",
|
||||
description: "Terms to search for in the user's memories",
|
||||
},
|
||||
includeFullDocs: {
|
||||
type: "boolean",
|
||||
description:
|
||||
"Whether to include the full document content in the response. Defaults to true for better AI context.",
|
||||
default: true,
|
||||
},
|
||||
limit: {
|
||||
type: "number",
|
||||
description: "Maximum number of results to return",
|
||||
default: 10,
|
||||
},
|
||||
},
|
||||
required: ["informationToGet"],
|
||||
},
|
||||
} satisfies OpenAI.FunctionDefinition,
|
||||
|
||||
addMemory: {
|
||||
name: "addMemory",
|
||||
description:
|
||||
"Add (remember) memories/details/information about the user or other facts or entities. Run when explicitly asked or when the user mentions any information generalizable beyond the context of the current conversation.",
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: {
|
||||
memory: {
|
||||
type: "string",
|
||||
description:
|
||||
"The text content of the memory to add. This should be a single sentence or a short paragraph.",
|
||||
},
|
||||
},
|
||||
required: ["memory"],
|
||||
},
|
||||
} satisfies OpenAI.FunctionDefinition,
|
||||
} as const
|
||||
|
||||
/**
|
||||
* Create memory tool handlers for OpenAI function calling
|
||||
*/
|
||||
export class SupermemoryTools {
|
||||
private client: Supermemory
|
||||
private containerTags: string[]
|
||||
|
||||
constructor(apiKey: string, config?: SupermemoryToolsConfig) {
|
||||
this.client = new Supermemory({
|
||||
apiKey,
|
||||
...(config?.baseUrl && { baseURL: config.baseUrl }),
|
||||
})
|
||||
|
||||
this.containerTags = config?.projectId
|
||||
? [`sm_project_${config.projectId}`]
|
||||
: (config?.containerTags ?? ["sm_project_default"])
|
||||
}
|
||||
|
||||
/**
|
||||
* Get OpenAI function definitions for all memory tools
|
||||
*/
|
||||
getToolDefinitions(): OpenAI.Chat.Completions.ChatCompletionTool[] {
|
||||
return [
|
||||
{ type: "function", function: memoryToolSchemas.searchMemories },
|
||||
{ type: "function", function: memoryToolSchemas.addMemory },
|
||||
]
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a tool call based on the function name and arguments
|
||||
*/
|
||||
async executeToolCall(
|
||||
toolCall: OpenAI.Chat.Completions.ChatCompletionMessageToolCall,
|
||||
): Promise<string> {
|
||||
const functionName = toolCall.function.name
|
||||
const args = JSON.parse(toolCall.function.arguments)
|
||||
|
||||
switch (functionName) {
|
||||
case "searchMemories":
|
||||
return JSON.stringify(await this.searchMemories(args))
|
||||
case "addMemory":
|
||||
return JSON.stringify(await this.addMemory(args))
|
||||
default:
|
||||
return JSON.stringify({
|
||||
success: false,
|
||||
error: `Unknown function: ${functionName}`,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Search memories
|
||||
*/
|
||||
async searchMemories({
|
||||
informationToGet,
|
||||
includeFullDocs = true,
|
||||
limit = 10,
|
||||
}: {
|
||||
informationToGet: string
|
||||
includeFullDocs?: boolean
|
||||
limit?: number
|
||||
}): Promise<MemorySearchResult> {
|
||||
try {
|
||||
const response = await this.client.search.execute({
|
||||
q: informationToGet,
|
||||
containerTags: this.containerTags,
|
||||
limit,
|
||||
chunkThreshold: 0.6,
|
||||
includeFullDocs,
|
||||
})
|
||||
|
||||
return {
|
||||
success: true,
|
||||
results: response.results,
|
||||
count: response.results?.length || 0,
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : "Unknown error",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a memory
|
||||
*/
|
||||
async addMemory({ memory }: { memory: string }): Promise<MemoryAddResult> {
|
||||
try {
|
||||
const metadata: Record<string, string | number | boolean> = {}
|
||||
|
||||
const response = await this.client.memories.add({
|
||||
content: memory,
|
||||
containerTags: this.containerTags,
|
||||
...(Object.keys(metadata).length > 0 && { metadata }),
|
||||
})
|
||||
|
||||
return {
|
||||
success: true,
|
||||
memory: response,
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : "Unknown error",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch a specific memory by ID
|
||||
*/
|
||||
async fetchMemory({
|
||||
memoryId,
|
||||
}: {
|
||||
memoryId: string
|
||||
}): Promise<MemoryFetchResult> {
|
||||
try {
|
||||
const response = await this.client.memories.get(memoryId)
|
||||
|
||||
return {
|
||||
success: true,
|
||||
memory: response,
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : "Unknown error",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to create SupermemoryTools instance
|
||||
*/
|
||||
export function createSupermemoryTools(
|
||||
apiKey: string,
|
||||
config?: SupermemoryToolsConfig,
|
||||
): SupermemoryTools {
|
||||
return new SupermemoryTools(apiKey, config)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get OpenAI function definitions for memory tools
|
||||
*/
|
||||
export function getMemoryToolDefinitions(): OpenAI.Chat.Completions.ChatCompletionTool[] {
|
||||
return [
|
||||
{ type: "function", function: memoryToolSchemas.searchMemories },
|
||||
{ type: "function", function: memoryToolSchemas.addMemory },
|
||||
]
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute tool calls from OpenAI function calling
|
||||
*/
|
||||
export async function executeMemoryToolCalls(
|
||||
apiKey: string,
|
||||
toolCalls: OpenAI.Chat.Completions.ChatCompletionMessageToolCall[],
|
||||
config?: SupermemoryToolsConfig,
|
||||
): Promise<OpenAI.Chat.Completions.ChatCompletionToolMessageParam[]> {
|
||||
const tools = new SupermemoryTools(apiKey, config)
|
||||
|
||||
const results = await Promise.all(
|
||||
toolCalls.map(async (toolCall) => {
|
||||
const result = await tools.executeToolCall(toolCall)
|
||||
return {
|
||||
tool_call_id: toolCall.id,
|
||||
role: "tool" as const,
|
||||
content: result,
|
||||
}
|
||||
}),
|
||||
)
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
/**
|
||||
* Individual tool creators for more granular control
|
||||
*/
|
||||
export function createSearchMemoriesTool(
|
||||
apiKey: string,
|
||||
config?: SupermemoryToolsConfig,
|
||||
) {
|
||||
return {
|
||||
definition: {
|
||||
type: "function" as const,
|
||||
function: memoryToolSchemas.searchMemories,
|
||||
},
|
||||
execute: (args: {
|
||||
informationToGet: string
|
||||
includeFullDocs?: boolean
|
||||
limit?: number
|
||||
}) => {
|
||||
const tools = new SupermemoryTools(apiKey, config)
|
||||
return tools.searchMemories(args)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
export function createAddMemoryTool(
|
||||
apiKey: string,
|
||||
config?: SupermemoryToolsConfig,
|
||||
) {
|
||||
return {
|
||||
definition: {
|
||||
type: "function" as const,
|
||||
function: memoryToolSchemas.addMemory,
|
||||
},
|
||||
execute: (args: { memory: string }) => {
|
||||
const tools = new SupermemoryTools(apiKey, config)
|
||||
return tools.addMemory(args)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -2,4 +2,4 @@ src/
|
|||
.turbo/
|
||||
.env
|
||||
tsdown.config.ts
|
||||
tsconfig.json
|
||||
tsconfig.json
|
||||
155
packages/tools/README.md
Normal file
155
packages/tools/README.md
Normal file
|
|
@ -0,0 +1,155 @@
|
|||
# @supermemory/tools
|
||||
|
||||
Memory tools for AI SDK and OpenAI function calling with supermemory.
|
||||
|
||||
This package provides supermemory tools for both AI SDK and OpenAI function calling through dedicated submodule exports, each with function-based architectures optimized for their respective use cases.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
npm install @supermemory/ai-sdk
|
||||
# or
|
||||
bun add @supermemory/ai-sdk
|
||||
# or
|
||||
pnpm add @supermemory/ai-sdk
|
||||
# or
|
||||
yarn add @supermemory/ai-sdk
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
The package provides two submodule imports:
|
||||
- `@supermemory/tools/ai-sdk` - For use with the AI SDK framework
|
||||
- `@supermemory/tools/openai` - For use with OpenAI's function calling
|
||||
|
||||
### AI SDK Usage
|
||||
|
||||
```typescript
|
||||
import { supermemoryTools, searchMemoriesTool, addMemoryTool } from "@supermemory/tools/ai-sdk"
|
||||
import { createOpenAI } from "@ai-sdk/openai"
|
||||
import { generateText } from "ai"
|
||||
|
||||
const openai = createOpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY!,
|
||||
})
|
||||
|
||||
// Create all tools
|
||||
const tools = supermemoryTools(process.env.SUPERMEMORY_API_KEY!, {
|
||||
projectId: "your-project-id",
|
||||
})
|
||||
|
||||
// Use with AI SDK
|
||||
const result = await generateText({
|
||||
model: openai("gpt-4"),
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: "What do you remember about my preferences?",
|
||||
},
|
||||
],
|
||||
tools,
|
||||
})
|
||||
|
||||
// Or create individual tools
|
||||
const searchTool = searchMemoriesTool(process.env.SUPERMEMORY_API_KEY!, {
|
||||
projectId: "your-project-id",
|
||||
})
|
||||
|
||||
const addTool = addMemoryTool(process.env.SUPERMEMORY_API_KEY!, {
|
||||
projectId: "your-project-id",
|
||||
})
|
||||
```
|
||||
|
||||
### OpenAI Function Calling Usage
|
||||
|
||||
```typescript
|
||||
import { supermemoryTools, getToolDefinitions, createToolCallExecutor } from "@supermemory/tools/openai"
|
||||
import OpenAI from "openai"
|
||||
|
||||
const client = new OpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY!,
|
||||
})
|
||||
|
||||
// Get tool definitions for OpenAI
|
||||
const toolDefinitions = getToolDefinitions()
|
||||
|
||||
// Create tool executor
|
||||
const executeToolCall = createToolCallExecutor(process.env.SUPERMEMORY_API_KEY!, {
|
||||
projectId: "your-project-id",
|
||||
})
|
||||
|
||||
// Use with OpenAI Chat Completions
|
||||
const completion = await client.chat.completions.create({
|
||||
model: "gpt-4",
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: "What do you remember about my preferences?",
|
||||
},
|
||||
],
|
||||
tools: toolDefinitions,
|
||||
})
|
||||
|
||||
// Execute tool calls if any
|
||||
if (completion.choices[0]?.message.tool_calls) {
|
||||
for (const toolCall of completion.choices[0].message.tool_calls) {
|
||||
const result = await executeToolCall(toolCall)
|
||||
console.log(result)
|
||||
}
|
||||
}
|
||||
|
||||
// Or create individual function-based tools
|
||||
const tools = supermemoryTools(process.env.SUPERMEMORY_API_KEY!, {
|
||||
projectId: "your-project-id",
|
||||
})
|
||||
|
||||
const searchResult = await tools.searchMemories({
|
||||
informationToGet: "user preferences",
|
||||
limit: 10,
|
||||
})
|
||||
|
||||
const addResult = await tools.addMemory({
|
||||
memory: "User prefers dark roast coffee",
|
||||
})
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Both modules accept the same configuration interface:
|
||||
|
||||
```typescript
|
||||
interface SupermemoryToolsConfig {
|
||||
baseUrl?: string
|
||||
containerTags?: string[]
|
||||
projectId?: string
|
||||
}
|
||||
```
|
||||
|
||||
- **baseUrl**: Custom base URL for the supermemory API
|
||||
- **containerTags**: Array of custom container tags (mutually exclusive with projectId)
|
||||
- **projectId**: Project ID which gets converted to container tag format (mutually exclusive with containerTags)
|
||||
|
||||
## Available Tools
|
||||
|
||||
### Search Memories
|
||||
Searches through stored memories based on a query string.
|
||||
|
||||
**Parameters:**
|
||||
- `informationToGet` (string): Terms to search for
|
||||
- `includeFullDocs` (boolean, optional): Whether to include full document content (default: true)
|
||||
- `limit` (number, optional): Maximum number of results (default: 10)
|
||||
|
||||
### Add Memory
|
||||
Adds a new memory to the system.
|
||||
|
||||
**Parameters:**
|
||||
- `memory` (string): The content to remember
|
||||
|
||||
|
||||
|
||||
## Environment Variables
|
||||
|
||||
```env
|
||||
SUPERMEMORY_API_KEY=your_supermemory_api_key
|
||||
SUPERMEMORY_BASE_URL=https://your-custom-url # optional
|
||||
```
|
||||
|
|
@ -1,19 +1,22 @@
|
|||
{
|
||||
"name": "@supermemory/openai-sdk",
|
||||
"version": "1.0.0",
|
||||
"name": "@supermemory/tools",
|
||||
"type": "module",
|
||||
"description": "OpenAI SDK utilities for supermemory",
|
||||
"version": "1.0.4",
|
||||
"description": "Memory tools for AI SDK and OpenAI function calling with supermemory",
|
||||
"scripts": {
|
||||
"build": "tsdown",
|
||||
"dev": "tsdown --watch",
|
||||
"check-types": "tsc --noEmit",
|
||||
"test": "vitest",
|
||||
"test:watch": "vitest --watch"
|
||||
"test": "vitest --testTimeout 100000",
|
||||
"test:watch": "vitest --watch --testTimeout 100000"
|
||||
},
|
||||
"dependencies": {
|
||||
"@ai-sdk/openai": "^2.0.23",
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
"ai": "^5.0.29",
|
||||
"openai": "^4.104.0",
|
||||
"supermemory": "^3.0.0-alpha.26",
|
||||
"zod": "^4.1.4"
|
||||
"zod": "^4.1.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@total-typescript/tsconfig": "^1.0.4",
|
||||
|
|
@ -28,19 +31,23 @@
|
|||
"types": "./dist/index.d.ts",
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
"./ai-sdk": "./dist/ai-sdk.js",
|
||||
"./openai": "./dist/openai.js",
|
||||
"./package.json": "./package.json"
|
||||
},
|
||||
"repository": {
|
||||
"url": "https://github.com/supermemoryai/supermemory",
|
||||
"directory": "packages/openai-sdk-ts"
|
||||
"directory": "packages/tools"
|
||||
},
|
||||
"keywords": [
|
||||
"ai",
|
||||
"sdk",
|
||||
"openai",
|
||||
"typescript",
|
||||
"supermemory",
|
||||
"ai",
|
||||
"memory",
|
||||
"context"
|
||||
"context",
|
||||
"tools"
|
||||
],
|
||||
"license": "MIT"
|
||||
}
|
||||
121
packages/tools/src/ai-sdk.ts
Normal file
121
packages/tools/src/ai-sdk.ts
Normal file
|
|
@ -0,0 +1,121 @@
|
|||
import Supermemory from "supermemory"
|
||||
import { tool } from "ai"
|
||||
import { z } from "zod"
|
||||
import {
|
||||
DEFAULT_VALUES,
|
||||
PARAMETER_DESCRIPTIONS,
|
||||
TOOL_DESCRIPTIONS,
|
||||
getContainerTags,
|
||||
} from "./shared"
|
||||
import type { SupermemoryToolsConfig } from "./types"
|
||||
|
||||
// Export individual tool creators
|
||||
export const searchMemoriesTool = (
|
||||
apiKey: string,
|
||||
config?: SupermemoryToolsConfig,
|
||||
) => {
|
||||
const client = new Supermemory({
|
||||
apiKey,
|
||||
...(config?.baseUrl ? { baseURL: config.baseUrl } : {}),
|
||||
})
|
||||
|
||||
const containerTags = getContainerTags(config)
|
||||
|
||||
return tool({
|
||||
description: TOOL_DESCRIPTIONS.searchMemories,
|
||||
inputSchema: z.object({
|
||||
informationToGet: z
|
||||
.string()
|
||||
.describe(PARAMETER_DESCRIPTIONS.informationToGet),
|
||||
includeFullDocs: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.default(DEFAULT_VALUES.includeFullDocs)
|
||||
.describe(PARAMETER_DESCRIPTIONS.includeFullDocs),
|
||||
limit: z
|
||||
.number()
|
||||
.optional()
|
||||
.default(DEFAULT_VALUES.limit)
|
||||
.describe(PARAMETER_DESCRIPTIONS.limit),
|
||||
}),
|
||||
execute: async ({
|
||||
informationToGet,
|
||||
includeFullDocs = DEFAULT_VALUES.includeFullDocs,
|
||||
limit = DEFAULT_VALUES.limit,
|
||||
}) => {
|
||||
try {
|
||||
const response = await client.search.execute({
|
||||
q: informationToGet,
|
||||
containerTags,
|
||||
limit,
|
||||
chunkThreshold: DEFAULT_VALUES.chunkThreshold,
|
||||
includeFullDocs,
|
||||
})
|
||||
|
||||
return {
|
||||
success: true,
|
||||
results: response.results,
|
||||
count: response.results?.length || 0,
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : "Unknown error",
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
export const addMemoryTool = (
|
||||
apiKey: string,
|
||||
config?: SupermemoryToolsConfig,
|
||||
) => {
|
||||
const client = new Supermemory({
|
||||
apiKey,
|
||||
...(config?.baseUrl ? { baseURL: config.baseUrl } : {}),
|
||||
})
|
||||
|
||||
const containerTags = getContainerTags(config)
|
||||
|
||||
return tool({
|
||||
description: TOOL_DESCRIPTIONS.addMemory,
|
||||
inputSchema: z.object({
|
||||
memory: z.string().describe(PARAMETER_DESCRIPTIONS.memory),
|
||||
}),
|
||||
execute: async ({ memory }) => {
|
||||
try {
|
||||
const metadata: Record<string, string | number | boolean> = {}
|
||||
|
||||
const response = await client.memories.add({
|
||||
content: memory,
|
||||
containerTags,
|
||||
...(Object.keys(metadata).length > 0 && { metadata }),
|
||||
})
|
||||
|
||||
return {
|
||||
success: true,
|
||||
memory: response,
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : "Unknown error",
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Create Supermemory tools for AI SDK
|
||||
*/
|
||||
export function supermemoryTools(
|
||||
apiKey: string,
|
||||
config?: SupermemoryToolsConfig,
|
||||
) {
|
||||
return {
|
||||
searchMemories: searchMemoriesTool(apiKey, config),
|
||||
addMemory: addMemoryTool(apiKey, config),
|
||||
}
|
||||
}
|
||||
2
packages/tools/src/index.ts
Normal file
2
packages/tools/src/index.ts
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
// Export shared types and utilities
|
||||
export type { SupermemoryToolsConfig } from "./types"
|
||||
276
packages/tools/src/openai.ts
Normal file
276
packages/tools/src/openai.ts
Normal file
|
|
@ -0,0 +1,276 @@
|
|||
import type OpenAI from "openai"
|
||||
import Supermemory from "supermemory"
|
||||
import {
|
||||
DEFAULT_VALUES,
|
||||
PARAMETER_DESCRIPTIONS,
|
||||
TOOL_DESCRIPTIONS,
|
||||
getContainerTags,
|
||||
} from "./shared"
|
||||
import type { SupermemoryToolsConfig } from "./types"
|
||||
|
||||
/**
|
||||
* Result types for memory operations
|
||||
*/
|
||||
export interface MemorySearchResult {
|
||||
success: boolean
|
||||
results?: Awaited<ReturnType<Supermemory["search"]["execute"]>>["results"]
|
||||
count?: number
|
||||
error?: string
|
||||
}
|
||||
|
||||
export interface MemoryAddResult {
|
||||
success: boolean
|
||||
memory?: Awaited<ReturnType<Supermemory["memories"]["add"]>>
|
||||
error?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Function schemas for OpenAI function calling
|
||||
*/
|
||||
export const memoryToolSchemas = {
|
||||
searchMemories: {
|
||||
name: "searchMemories",
|
||||
description: TOOL_DESCRIPTIONS.searchMemories,
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: {
|
||||
informationToGet: {
|
||||
type: "string",
|
||||
description: PARAMETER_DESCRIPTIONS.informationToGet,
|
||||
},
|
||||
includeFullDocs: {
|
||||
type: "boolean",
|
||||
description: PARAMETER_DESCRIPTIONS.includeFullDocs,
|
||||
default: DEFAULT_VALUES.includeFullDocs,
|
||||
},
|
||||
limit: {
|
||||
type: "number",
|
||||
description: PARAMETER_DESCRIPTIONS.limit,
|
||||
default: DEFAULT_VALUES.limit,
|
||||
},
|
||||
},
|
||||
required: ["informationToGet"],
|
||||
},
|
||||
} satisfies OpenAI.FunctionDefinition,
|
||||
|
||||
addMemory: {
|
||||
name: "addMemory",
|
||||
description: TOOL_DESCRIPTIONS.addMemory,
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: {
|
||||
memory: {
|
||||
type: "string",
|
||||
description: PARAMETER_DESCRIPTIONS.memory,
|
||||
},
|
||||
},
|
||||
required: ["memory"],
|
||||
},
|
||||
} satisfies OpenAI.FunctionDefinition,
|
||||
} as const
|
||||
|
||||
/**
|
||||
* Create a Supermemory client with configuration
|
||||
*/
|
||||
function createClient(apiKey: string, config?: SupermemoryToolsConfig) {
|
||||
const client = new Supermemory({
|
||||
apiKey,
|
||||
...(config?.baseUrl && { baseURL: config.baseUrl }),
|
||||
})
|
||||
|
||||
const containerTags = getContainerTags(config)
|
||||
|
||||
return { client, containerTags }
|
||||
}
|
||||
|
||||
/**
|
||||
* Search memories function
|
||||
*/
|
||||
export function createSearchMemoriesFunction(
|
||||
apiKey: string,
|
||||
config?: SupermemoryToolsConfig,
|
||||
) {
|
||||
const { client, containerTags } = createClient(apiKey, config)
|
||||
|
||||
return async function searchMemories({
|
||||
informationToGet,
|
||||
includeFullDocs = DEFAULT_VALUES.includeFullDocs,
|
||||
limit = DEFAULT_VALUES.limit,
|
||||
}: {
|
||||
informationToGet: string
|
||||
includeFullDocs?: boolean
|
||||
limit?: number
|
||||
}): Promise<MemorySearchResult> {
|
||||
try {
|
||||
const response = await client.search.execute({
|
||||
q: informationToGet,
|
||||
containerTags,
|
||||
limit,
|
||||
chunkThreshold: DEFAULT_VALUES.chunkThreshold,
|
||||
includeFullDocs,
|
||||
})
|
||||
|
||||
return {
|
||||
success: true,
|
||||
results: response.results,
|
||||
count: response.results?.length || 0,
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : "Unknown error",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add memory function
|
||||
*/
|
||||
export function createAddMemoryFunction(
|
||||
apiKey: string,
|
||||
config?: SupermemoryToolsConfig,
|
||||
) {
|
||||
const { client, containerTags } = createClient(apiKey, config)
|
||||
|
||||
return async function addMemory({
|
||||
memory,
|
||||
}: {
|
||||
memory: string
|
||||
}): Promise<MemoryAddResult> {
|
||||
try {
|
||||
const metadata: Record<string, string | number | boolean> = {}
|
||||
|
||||
const response = await client.memories.add({
|
||||
content: memory,
|
||||
containerTags,
|
||||
...(Object.keys(metadata).length > 0 && { metadata }),
|
||||
})
|
||||
|
||||
return {
|
||||
success: true,
|
||||
memory: response,
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : "Unknown error",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create all memory tools functions
|
||||
*/
|
||||
export function supermemoryTools(
|
||||
apiKey: string,
|
||||
config?: SupermemoryToolsConfig,
|
||||
) {
|
||||
const searchMemories = createSearchMemoriesFunction(apiKey, config)
|
||||
const addMemory = createAddMemoryFunction(apiKey, config)
|
||||
|
||||
return {
|
||||
searchMemories,
|
||||
addMemory,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get OpenAI function definitions for all memory tools
|
||||
*/
|
||||
export function getToolDefinitions(): OpenAI.Chat.Completions.ChatCompletionTool[] {
|
||||
return [
|
||||
{ type: "function", function: memoryToolSchemas.searchMemories },
|
||||
{ type: "function", function: memoryToolSchemas.addMemory },
|
||||
]
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a tool call based on the function name and arguments
|
||||
*/
|
||||
export function createToolCallExecutor(
|
||||
apiKey: string,
|
||||
config?: SupermemoryToolsConfig,
|
||||
) {
|
||||
const tools = supermemoryTools(apiKey, config)
|
||||
|
||||
return async function executeToolCall(
|
||||
toolCall: OpenAI.Chat.Completions.ChatCompletionMessageToolCall,
|
||||
): Promise<string> {
|
||||
const functionName = toolCall.function.name
|
||||
const args = JSON.parse(toolCall.function.arguments)
|
||||
|
||||
switch (functionName) {
|
||||
case "searchMemories":
|
||||
return JSON.stringify(await tools.searchMemories(args))
|
||||
case "addMemory":
|
||||
return JSON.stringify(await tools.addMemory(args))
|
||||
default:
|
||||
return JSON.stringify({
|
||||
success: false,
|
||||
error: `Unknown function: ${functionName}`,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute tool calls from OpenAI function calling
|
||||
*/
|
||||
export function createToolCallsExecutor(
|
||||
apiKey: string,
|
||||
config?: SupermemoryToolsConfig,
|
||||
) {
|
||||
const executeToolCall = createToolCallExecutor(apiKey, config)
|
||||
|
||||
return async function executeToolCalls(
|
||||
toolCalls: OpenAI.Chat.Completions.ChatCompletionMessageToolCall[],
|
||||
): Promise<OpenAI.Chat.Completions.ChatCompletionToolMessageParam[]> {
|
||||
const results = await Promise.all(
|
||||
toolCalls.map(async (toolCall) => {
|
||||
const result = await executeToolCall(toolCall)
|
||||
return {
|
||||
tool_call_id: toolCall.id,
|
||||
role: "tool" as const,
|
||||
content: result,
|
||||
}
|
||||
}),
|
||||
)
|
||||
|
||||
return results
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Individual tool creators for more granular control
|
||||
*/
|
||||
export function createSearchMemoriesTool(
|
||||
apiKey: string,
|
||||
config?: SupermemoryToolsConfig,
|
||||
) {
|
||||
const searchMemories = createSearchMemoriesFunction(apiKey, config)
|
||||
|
||||
return {
|
||||
definition: {
|
||||
type: "function" as const,
|
||||
function: memoryToolSchemas.searchMemories,
|
||||
},
|
||||
execute: searchMemories,
|
||||
}
|
||||
}
|
||||
|
||||
export function createAddMemoryTool(
|
||||
apiKey: string,
|
||||
config?: SupermemoryToolsConfig,
|
||||
) {
|
||||
const addMemory = createAddMemoryFunction(apiKey, config)
|
||||
|
||||
return {
|
||||
definition: {
|
||||
type: "function" as const,
|
||||
function: memoryToolSchemas.addMemory,
|
||||
},
|
||||
execute: addMemory,
|
||||
}
|
||||
}
|
||||
47
packages/tools/src/shared.ts
Normal file
47
packages/tools/src/shared.ts
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
/**
|
||||
* Shared constants and descriptions for Supermemory tools
|
||||
*/
|
||||
|
||||
// Tool descriptions
|
||||
export const TOOL_DESCRIPTIONS = {
|
||||
searchMemories:
|
||||
"Search (recall) memories/details/information about the user or other facts or entities. Run when explicitly asked or when context about user's past choices would be helpful.",
|
||||
addMemory:
|
||||
"Add (remember) memories/details/information about the user or other facts or entities. Run when explicitly asked or when the user mentions any information generalizable beyond the context of the current conversation.",
|
||||
} as const
|
||||
|
||||
// Parameter descriptions
|
||||
export const PARAMETER_DESCRIPTIONS = {
|
||||
informationToGet: "Terms to search for in the user's memories",
|
||||
includeFullDocs:
|
||||
"Whether to include the full document content in the response. Defaults to true for better AI context.",
|
||||
limit: "Maximum number of results to return",
|
||||
memory:
|
||||
"The text content of the memory to add. This should be a single sentence or a short paragraph.",
|
||||
} as const
|
||||
|
||||
// Default values
|
||||
export const DEFAULT_VALUES = {
|
||||
includeFullDocs: true,
|
||||
limit: 10,
|
||||
chunkThreshold: 0.6,
|
||||
} as const
|
||||
|
||||
// Container tag constants
|
||||
export const CONTAINER_TAG_CONSTANTS = {
|
||||
projectPrefix: "sm_project_",
|
||||
defaultTags: ["sm_project_default"] as string[],
|
||||
} as const
|
||||
|
||||
/**
|
||||
* Helper function to generate container tags based on config
|
||||
*/
|
||||
export function getContainerTags(config?: {
|
||||
projectId?: string
|
||||
containerTags?: string[]
|
||||
}): string[] {
|
||||
if (config?.projectId) {
|
||||
return [`${CONTAINER_TAG_CONSTANTS.projectPrefix}${config.projectId}`]
|
||||
}
|
||||
return config?.containerTags ?? CONTAINER_TAG_CONSTANTS.defaultTags
|
||||
}
|
||||
274
packages/tools/src/tools.test.ts
Normal file
274
packages/tools/src/tools.test.ts
Normal file
|
|
@ -0,0 +1,274 @@
|
|||
import { createOpenAI } from "@ai-sdk/openai"
|
||||
import { generateText } from "ai"
|
||||
import { describe, expect, it } from "vitest"
|
||||
import * as aiSdk from "./ai-sdk"
|
||||
import * as openAi from "./openai"
|
||||
import type { SupermemoryToolsConfig } from "./types"
|
||||
|
||||
import "dotenv/config"
|
||||
|
||||
describe("@supermemory/tools", () => {
|
||||
// Required API keys - tests will fail if not provided
|
||||
const testApiKey = process.env.SUPERMEMORY_API_KEY
|
||||
const testOpenAIKey = process.env.OPENAI_API_KEY
|
||||
|
||||
if (!testApiKey) {
|
||||
throw new Error(
|
||||
"SUPERMEMORY_API_KEY environment variable is required for tests",
|
||||
)
|
||||
}
|
||||
if (!testOpenAIKey) {
|
||||
throw new Error("OPENAI_API_KEY environment variable is required for tests")
|
||||
}
|
||||
|
||||
// Optional configuration with defaults
|
||||
const testBaseUrl = process.env.SUPERMEMORY_BASE_URL ?? undefined
|
||||
const testModelName = process.env.MODEL_NAME || "gpt-4o-mini"
|
||||
|
||||
describe("aiSdk module", () => {
|
||||
describe("client initialization", () => {
|
||||
it("should create tools with default configuration", () => {
|
||||
const config: SupermemoryToolsConfig = {}
|
||||
const tools = aiSdk.supermemoryTools(testApiKey, config)
|
||||
|
||||
expect(tools).toBeDefined()
|
||||
expect(tools.searchMemories).toBeDefined()
|
||||
expect(tools.addMemory).toBeDefined()
|
||||
})
|
||||
|
||||
it("should create tools with custom baseUrl", () => {
|
||||
const config: SupermemoryToolsConfig = {
|
||||
baseUrl: testBaseUrl,
|
||||
}
|
||||
const tools = aiSdk.supermemoryTools(testApiKey, config)
|
||||
|
||||
expect(tools).toBeDefined()
|
||||
expect(tools.searchMemories).toBeDefined()
|
||||
expect(tools.addMemory).toBeDefined()
|
||||
})
|
||||
|
||||
it("should create individual tools", () => {
|
||||
const searchTool = aiSdk.searchMemoriesTool(testApiKey, {
|
||||
projectId: "test-project-123",
|
||||
})
|
||||
const addTool = aiSdk.addMemoryTool(testApiKey, {
|
||||
projectId: "test-project-123",
|
||||
})
|
||||
|
||||
expect(searchTool).toBeDefined()
|
||||
expect(addTool).toBeDefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe("AI SDK integration", () => {
|
||||
it("should work with AI SDK generateText", async () => {
|
||||
const openai = createOpenAI({
|
||||
apiKey: testOpenAIKey,
|
||||
})
|
||||
|
||||
const result = await generateText({
|
||||
model: openai(testModelName),
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
content:
|
||||
"You are a helpful assistant with access to user memories. Use the search tool when the user asks about preferences or past information.",
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: "What do you remember about my preferences?",
|
||||
},
|
||||
],
|
||||
tools: {
|
||||
...aiSdk.supermemoryTools(testApiKey, {
|
||||
projectId: "test-ai-integration",
|
||||
baseUrl: testBaseUrl,
|
||||
}),
|
||||
},
|
||||
})
|
||||
|
||||
expect(result).toBeDefined()
|
||||
expect(result.text).toBeDefined()
|
||||
expect(typeof result.text).toBe("string")
|
||||
})
|
||||
|
||||
it("should use tools when prompted", async () => {
|
||||
const openai = createOpenAI({
|
||||
apiKey: testOpenAIKey,
|
||||
})
|
||||
|
||||
const tools = aiSdk.supermemoryTools(testApiKey, {
|
||||
projectId: "test-tool-usage",
|
||||
baseUrl: testBaseUrl,
|
||||
})
|
||||
|
||||
const result = await generateText({
|
||||
model: openai(testModelName),
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
content:
|
||||
"You are a helpful assistant. When the user asks you to remember something, use the addMemory tool.",
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: "Please remember that I prefer dark roast coffee",
|
||||
},
|
||||
],
|
||||
tools: {
|
||||
addMemory: tools.addMemory,
|
||||
},
|
||||
})
|
||||
|
||||
expect(result).toBeDefined()
|
||||
expect(result.text).toBeDefined()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("openAi module", () => {
|
||||
describe("function-based tools", () => {
|
||||
it("should create function-based tools", () => {
|
||||
const tools = openAi.supermemoryTools(testApiKey, {
|
||||
projectId: "test-openai-functions",
|
||||
})
|
||||
|
||||
expect(tools).toBeDefined()
|
||||
expect(tools.searchMemories).toBeDefined()
|
||||
expect(tools.addMemory).toBeDefined()
|
||||
})
|
||||
|
||||
it("should create individual tool functions", () => {
|
||||
const searchFunction = openAi.createSearchMemoriesFunction(testApiKey, {
|
||||
projectId: "test-individual",
|
||||
})
|
||||
const addFunction = openAi.createAddMemoryFunction(testApiKey, {
|
||||
projectId: "test-individual",
|
||||
})
|
||||
|
||||
expect(searchFunction).toBeDefined()
|
||||
expect(addFunction).toBeDefined()
|
||||
expect(typeof searchFunction).toBe("function")
|
||||
expect(typeof addFunction).toBe("function")
|
||||
})
|
||||
})
|
||||
|
||||
describe("tool definitions", () => {
|
||||
it("should return proper OpenAI function definitions", () => {
|
||||
const definitions = openAi.getToolDefinitions()
|
||||
|
||||
expect(definitions).toBeDefined()
|
||||
expect(definitions.length).toBe(2)
|
||||
|
||||
// Check searchMemories
|
||||
const searchTool = definitions.find(
|
||||
(d) => d.function.name === "searchMemories",
|
||||
)
|
||||
expect(searchTool).toBeDefined()
|
||||
expect(searchTool!.type).toBe("function")
|
||||
expect(searchTool!.function.parameters?.required).toContain(
|
||||
"informationToGet",
|
||||
)
|
||||
|
||||
// Check addMemory
|
||||
const addTool = definitions.find((d) => d.function.name === "addMemory")
|
||||
expect(addTool).toBeDefined()
|
||||
expect(addTool!.type).toBe("function")
|
||||
expect(addTool!.function.parameters?.required).toContain("memory")
|
||||
})
|
||||
})
|
||||
|
||||
describe("tool execution", () => {
|
||||
it("should create tool call executor", () => {
|
||||
const executor = openAi.createToolCallExecutor(testApiKey, {
|
||||
containerTags: ["test-executor"],
|
||||
baseUrl: testBaseUrl,
|
||||
})
|
||||
|
||||
expect(executor).toBeDefined()
|
||||
expect(typeof executor).toBe("function")
|
||||
})
|
||||
|
||||
it("should create tool calls executor", () => {
|
||||
const executor = openAi.createToolCallsExecutor(testApiKey, {
|
||||
containerTags: ["test-executors"],
|
||||
baseUrl: testBaseUrl,
|
||||
})
|
||||
|
||||
expect(executor).toBeDefined()
|
||||
expect(typeof executor).toBe("function")
|
||||
})
|
||||
})
|
||||
|
||||
describe("individual tool creators", () => {
|
||||
it("should create individual search tool", () => {
|
||||
const searchTool = openAi.createSearchMemoriesTool(testApiKey, {
|
||||
projectId: "test-individual",
|
||||
})
|
||||
|
||||
expect(searchTool).toBeDefined()
|
||||
expect(searchTool.definition).toBeDefined()
|
||||
expect(searchTool.execute).toBeDefined()
|
||||
expect(searchTool.definition.function.name).toBe("searchMemories")
|
||||
})
|
||||
|
||||
it("should create individual add tool", () => {
|
||||
const addTool = openAi.createAddMemoryTool(testApiKey, {
|
||||
projectId: "test-individual",
|
||||
})
|
||||
|
||||
expect(addTool).toBeDefined()
|
||||
expect(addTool.definition).toBeDefined()
|
||||
expect(addTool.execute).toBeDefined()
|
||||
expect(addTool.definition.function.name).toBe("addMemory")
|
||||
})
|
||||
})
|
||||
|
||||
describe("memory operations", () => {
|
||||
it("should search memories", async () => {
|
||||
const searchFunction = openAi.createSearchMemoriesFunction(testApiKey, {
|
||||
projectId: "test-search",
|
||||
baseUrl: testBaseUrl,
|
||||
})
|
||||
|
||||
const result = await searchFunction({
|
||||
informationToGet: "test preferences",
|
||||
limit: 5,
|
||||
})
|
||||
|
||||
expect(result).toBeDefined()
|
||||
expect(result.success).toBeDefined()
|
||||
expect(typeof result.success).toBe("boolean")
|
||||
|
||||
if (result.success) {
|
||||
expect(result.results).toBeDefined()
|
||||
expect(result.count).toBeDefined()
|
||||
expect(typeof result.count).toBe("number")
|
||||
} else {
|
||||
expect(result.error).toBeDefined()
|
||||
}
|
||||
})
|
||||
|
||||
it("should add memory", async () => {
|
||||
const addFunction = openAi.createAddMemoryFunction(testApiKey, {
|
||||
containerTags: ["test-add-memory"],
|
||||
baseUrl: testBaseUrl,
|
||||
})
|
||||
|
||||
const result = await addFunction({
|
||||
memory: "User prefers dark roast coffee in the morning - test memory",
|
||||
})
|
||||
|
||||
expect(result).toBeDefined()
|
||||
expect(result.success).toBeDefined()
|
||||
expect(typeof result.success).toBe("boolean")
|
||||
|
||||
if (result.success) {
|
||||
expect(result.memory).toBeDefined()
|
||||
} else {
|
||||
expect(result.error).toBeDefined()
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
9
packages/tools/src/types.ts
Normal file
9
packages/tools/src/types.ts
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
/**
|
||||
* Supermemory configuration
|
||||
* Only one of `projectId` or `containerTags` can be provided.
|
||||
*/
|
||||
export interface SupermemoryToolsConfig {
|
||||
baseUrl?: string
|
||||
containerTags?: string[]
|
||||
projectId?: string
|
||||
}
|
||||
|
|
@ -1,15 +1,15 @@
|
|||
import { defineConfig } from "tsdown"
|
||||
|
||||
export default defineConfig({
|
||||
entry: ["src/index.ts"],
|
||||
entry: ["src/index.ts", "src/ai-sdk.ts", "src/openai.ts"],
|
||||
format: "esm",
|
||||
sourcemap: true,
|
||||
sourcemap: false,
|
||||
target: "es2020",
|
||||
tsconfig: "./tsconfig.json",
|
||||
clean: true,
|
||||
minify: true,
|
||||
dts: {
|
||||
sourcemap: true,
|
||||
sourcemap: false,
|
||||
},
|
||||
exports: true,
|
||||
})
|
||||
Loading…
Add table
Add a link
Reference in a new issue