diff --git a/bun.lock b/bun.lock index 1755d23..3d9d14e 100644 --- a/bun.lock +++ b/bun.lock @@ -9,7 +9,7 @@ "fumadocs-mdx": "14.2.7", "fumadocs-ui": "16.6.0", "lucide-react": "^0.563.0", - "next": "16.1.6", + "next": "16.2.3", "posthog-js": "^1.354.0", "posthog-node": "^5.26.0", "react": "^19.2.4", @@ -187,23 +187,23 @@ "@mdx-js/mdx": ["@mdx-js/mdx@3.1.1", "", { "dependencies": { "@types/estree": "1.0.8", "@types/estree-jsx": "1.0.5", "@types/hast": "3.0.4", "@types/mdx": "2.0.13", "acorn": "8.15.0", "collapse-white-space": "2.1.0", "devlop": "1.1.0", "estree-util-is-identifier-name": "3.0.0", "estree-util-scope": "1.0.0", "estree-walker": "3.0.3", "hast-util-to-jsx-runtime": "2.3.6", "markdown-extensions": "2.0.0", "recma-build-jsx": "1.0.0", "recma-jsx": "1.0.1", "recma-stringify": "1.0.0", "rehype-recma": "1.0.0", "remark-mdx": "3.1.1", "remark-parse": "11.0.0", "remark-rehype": "11.1.2", "source-map": "0.7.6", "unified": "11.0.5", "unist-util-position-from-estree": "2.0.0", "unist-util-stringify-position": "4.0.0", "unist-util-visit": "5.1.0", "vfile": "6.0.3" } }, "sha512-f6ZO2ifpwAQIpzGWaBQT2TXxPv6z3RBzQKpVftEWN78Vl/YweF1uwussDx8ECAXVtr3Rs89fKyG9YlzUs9DyGQ=="], - "@next/env": ["@next/env@16.1.6", "", {}, "sha512-N1ySLuZjnAtN3kFnwhAwPvZah8RJxKasD7x1f8shFqhncnWZn4JMfg37diLNuoHsLAlrDfM3g4mawVdtAG8XLQ=="], + "@next/env": ["@next/env@16.2.3", "", {}, "sha512-ZWXyj4uNu4GCWQw9cjRxWlbD+33mcDszIo9iQxFnBX3Wmgq9ulaSJcl6VhuWx5pCWqqD+9W6Wfz7N0lM5lYPMA=="], - "@next/swc-darwin-arm64": ["@next/swc-darwin-arm64@16.1.6", "", { "os": "darwin", "cpu": "arm64" }, "sha512-wTzYulosJr/6nFnqGW7FrG3jfUUlEf8UjGA0/pyypJl42ExdVgC6xJgcXQ+V8QFn6niSG2Pb8+MIG1mZr2vczw=="], + "@next/swc-darwin-arm64": ["@next/swc-darwin-arm64@16.2.3", "", { "os": "darwin", "cpu": "arm64" }, "sha512-u37KDKTKQ+OQLvY+z7SNXixwo4Q2/IAJFDzU1fYe66IbCE51aDSAzkNDkWmLN0yjTUh4BKBd+hb69jYn6qqqSg=="], - "@next/swc-darwin-x64": ["@next/swc-darwin-x64@16.1.6", "", { "os": "darwin", "cpu": "x64" }, "sha512-BLFPYPDO+MNJsiDWbeVzqvYd4NyuRrEYVB5k2N3JfWncuHAy2IVwMAOlVQDFjj+krkWzhY2apvmekMkfQR0CUQ=="], + "@next/swc-darwin-x64": ["@next/swc-darwin-x64@16.2.3", "", { "os": "darwin", "cpu": "x64" }, "sha512-gHjL/qy6Q6CG3176FWbAKyKh9IfntKZTB3RY/YOJdDFpHGsUDXVH38U4mMNpHVGXmeYW4wj22dMp1lTfmu/bTQ=="], - "@next/swc-linux-arm64-gnu": ["@next/swc-linux-arm64-gnu@16.1.6", "", { "os": "linux", "cpu": "arm64" }, "sha512-OJYkCd5pj/QloBvoEcJ2XiMnlJkRv9idWA/j0ugSuA34gMT6f5b7vOiCQHVRpvStoZUknhl6/UxOXL4OwtdaBw=="], + "@next/swc-linux-arm64-gnu": ["@next/swc-linux-arm64-gnu@16.2.3", "", { "os": "linux", "cpu": "arm64" }, "sha512-U6vtblPtU/P14Y/b/n9ZY0GOxbbIhTFuaFR7F4/uMBidCi2nSdaOFhA0Go81L61Zd6527+yvuX44T4ksnf8T+Q=="], - "@next/swc-linux-arm64-musl": ["@next/swc-linux-arm64-musl@16.1.6", "", { "os": "linux", "cpu": "arm64" }, "sha512-S4J2v+8tT3NIO9u2q+S0G5KdvNDjXfAv06OhfOzNDaBn5rw84DGXWndOEB7d5/x852A20sW1M56vhC/tRVbccQ=="], + "@next/swc-linux-arm64-musl": ["@next/swc-linux-arm64-musl@16.2.3", "", { "os": "linux", "cpu": "arm64" }, "sha512-/YV0LgjHUmfhQpn9bVoGc4x4nan64pkhWR5wyEV8yCOfwwrH630KpvRg86olQHTwHIn1z59uh6JwKvHq1h4QEw=="], - "@next/swc-linux-x64-gnu": ["@next/swc-linux-x64-gnu@16.1.6", "", { "os": "linux", "cpu": "x64" }, "sha512-2eEBDkFlMMNQnkTyPBhQOAyn2qMxyG2eE7GPH2WIDGEpEILcBPI/jdSv4t6xupSP+ot/jkfrCShLAa7+ZUPcJQ=="], + "@next/swc-linux-x64-gnu": ["@next/swc-linux-x64-gnu@16.2.3", "", { "os": "linux", "cpu": "x64" }, "sha512-/HiWEcp+WMZ7VajuiMEFGZ6cg0+aYZPqCJD3YJEfpVWQsKYSjXQG06vJP6F1rdA03COD9Fef4aODs3YxKx+RDQ=="], - "@next/swc-linux-x64-musl": ["@next/swc-linux-x64-musl@16.1.6", "", { "os": "linux", "cpu": "x64" }, "sha512-oicJwRlyOoZXVlxmIMaTq7f8pN9QNbdes0q2FXfRsPhfCi8n8JmOZJm5oo1pwDaFbnnD421rVU409M3evFbIqg=="], + "@next/swc-linux-x64-musl": ["@next/swc-linux-x64-musl@16.2.3", "", { "os": "linux", "cpu": "x64" }, "sha512-Kt44hGJfZSefebhk/7nIdivoDr3Ugp5+oNz9VvF3GUtfxutucUIHfIO0ZYO8QlOPDQloUVQn4NVC/9JvHRk9hw=="], - "@next/swc-win32-arm64-msvc": ["@next/swc-win32-arm64-msvc@16.1.6", "", { "os": "win32", "cpu": "arm64" }, "sha512-gQmm8izDTPgs+DCWH22kcDmuUp7NyiJgEl18bcr8irXA5N2m2O+JQIr6f3ct42GOs9c0h8QF3L5SzIxcYAAXXw=="], + "@next/swc-win32-arm64-msvc": ["@next/swc-win32-arm64-msvc@16.2.3", "", { "os": "win32", "cpu": "arm64" }, "sha512-O2NZ9ie3Tq6xj5Z5CSwBT3+aWAMW2PIZ4egUi9MaWLkwaehgtB7YZjPm+UpcNpKOme0IQuqDcor7BsW6QBiQBw=="], - "@next/swc-win32-x64-msvc": ["@next/swc-win32-x64-msvc@16.1.6", "", { "os": "win32", "cpu": "x64" }, "sha512-NRfO39AIrzBnixKbjuo2YiYhB6o9d8v/ymU9m/Xk8cyVk+k7XylniXkHwjs4s70wedVffc6bQNbufk5v0xEm0A=="], + "@next/swc-win32-x64-msvc": ["@next/swc-win32-x64-msvc@16.2.3", "", { "os": "win32", "cpu": "x64" }, "sha512-Ibm29/GgB/ab5n7XKqlStkm54qqZE8v2FnijUPBgrd67FWrac45o/RsNlaOWjme/B5UqeWt/8KM4aWBwA1D2Kw=="], "@opentelemetry/api": ["@opentelemetry/api@1.9.0", "", {}, "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg=="], @@ -719,7 +719,7 @@ "negotiator": ["negotiator@1.0.0", "", {}, "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg=="], - "next": ["next@16.1.6", "", { "dependencies": { "@next/env": "16.1.6", "@swc/helpers": "0.5.15", "baseline-browser-mapping": "2.9.19", "caniuse-lite": "1.0.30001769", "postcss": "8.4.31", "styled-jsx": "5.1.6" }, "optionalDependencies": { "@next/swc-darwin-arm64": "16.1.6", "@next/swc-darwin-x64": "16.1.6", "@next/swc-linux-arm64-gnu": "16.1.6", "@next/swc-linux-arm64-musl": "16.1.6", "@next/swc-linux-x64-gnu": "16.1.6", "@next/swc-linux-x64-musl": "16.1.6", "@next/swc-win32-arm64-msvc": "16.1.6", "@next/swc-win32-x64-msvc": "16.1.6", "@opentelemetry/api": "1.9.0", "sharp": "0.34.5" }, "peerDependencies": { "react": "19.2.4", "react-dom": "19.2.4" }, "bin": { "next": "dist/bin/next" } }, "sha512-hkyRkcu5x/41KoqnROkfTm2pZVbKxvbZRuNvKXLRXxs3VfyO0WhY50TQS40EuKO9SW3rBj/sF3WbVwDACeMZyw=="], + "next": ["next@16.2.3", "", { "dependencies": { "@next/env": "16.2.3", "@swc/helpers": "0.5.15", "baseline-browser-mapping": "^2.9.19", "caniuse-lite": "^1.0.30001579", "postcss": "8.4.31", "styled-jsx": "5.1.6" }, "optionalDependencies": { "@next/swc-darwin-arm64": "16.2.3", "@next/swc-darwin-x64": "16.2.3", "@next/swc-linux-arm64-gnu": "16.2.3", "@next/swc-linux-arm64-musl": "16.2.3", "@next/swc-linux-x64-gnu": "16.2.3", "@next/swc-linux-x64-musl": "16.2.3", "@next/swc-win32-arm64-msvc": "16.2.3", "@next/swc-win32-x64-msvc": "16.2.3", "sharp": "^0.34.5" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", "@playwright/test": "^1.51.1", "babel-plugin-react-compiler": "*", "react": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", "react-dom": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", "sass": "^1.3.0" }, "optionalPeers": ["@opentelemetry/api", "@playwright/test", "babel-plugin-react-compiler", "sass"], "bin": { "next": "dist/bin/next" } }, "sha512-9V3zV4oZFza3PVev5/poB9g0dEafVcgNyQ8eTRop8GvxZjV2G15FC5ARuG1eFD42QgeYkzJBJzHghNP8Ad9xtA=="], "next-themes": ["next-themes@0.4.6", "", { "peerDependencies": { "react": "19.2.4", "react-dom": "19.2.4" } }, "sha512-pZvgD5L0IEvX5/9GWyHMf3m8BKiVQwsCMHfoFosXtXBMnaS0ZnIJ9ST4b4NqLVKDEm8QBxoNNGNaBv2JNF6XNA=="], @@ -921,10 +921,76 @@ "@radix-ui/react-primitive/@radix-ui/react-slot": ["@radix-ui/react-slot@1.2.3", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2" }, "optionalDependencies": { "@types/react": "19.2.14" }, "peerDependencies": { "react": "19.2.4" } }, "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A=="], + "fumadocs-core/next": ["next@16.1.6", "", { "dependencies": { "@next/env": "16.1.6", "@swc/helpers": "0.5.15", "baseline-browser-mapping": "2.9.19", "caniuse-lite": "1.0.30001769", "postcss": "8.4.31", "styled-jsx": "5.1.6" }, "optionalDependencies": { "@next/swc-darwin-arm64": "16.1.6", "@next/swc-darwin-x64": "16.1.6", "@next/swc-linux-arm64-gnu": "16.1.6", "@next/swc-linux-arm64-musl": "16.1.6", "@next/swc-linux-x64-gnu": "16.1.6", "@next/swc-linux-x64-musl": "16.1.6", "@next/swc-win32-arm64-msvc": "16.1.6", "@next/swc-win32-x64-msvc": "16.1.6", "@opentelemetry/api": "1.9.0", "sharp": "0.34.5" }, "peerDependencies": { "react": "19.2.4", "react-dom": "19.2.4" }, "bin": { "next": "dist/bin/next" } }, "sha512-hkyRkcu5x/41KoqnROkfTm2pZVbKxvbZRuNvKXLRXxs3VfyO0WhY50TQS40EuKO9SW3rBj/sF3WbVwDACeMZyw=="], + + "fumadocs-mdx/next": ["next@16.1.6", "", { "dependencies": { "@next/env": "16.1.6", "@swc/helpers": "0.5.15", "baseline-browser-mapping": "2.9.19", "caniuse-lite": "1.0.30001769", "postcss": "8.4.31", "styled-jsx": "5.1.6" }, "optionalDependencies": { "@next/swc-darwin-arm64": "16.1.6", "@next/swc-darwin-x64": "16.1.6", "@next/swc-linux-arm64-gnu": "16.1.6", "@next/swc-linux-arm64-musl": "16.1.6", "@next/swc-linux-x64-gnu": "16.1.6", "@next/swc-linux-x64-musl": "16.1.6", "@next/swc-win32-arm64-msvc": "16.1.6", "@next/swc-win32-x64-msvc": "16.1.6", "@opentelemetry/api": "1.9.0", "sharp": "0.34.5" }, "peerDependencies": { "react": "19.2.4", "react-dom": "19.2.4" }, "bin": { "next": "dist/bin/next" } }, "sha512-hkyRkcu5x/41KoqnROkfTm2pZVbKxvbZRuNvKXLRXxs3VfyO0WhY50TQS40EuKO9SW3rBj/sF3WbVwDACeMZyw=="], + + "fumadocs-ui/next": ["next@16.1.6", "", { "dependencies": { "@next/env": "16.1.6", "@swc/helpers": "0.5.15", "baseline-browser-mapping": "2.9.19", "caniuse-lite": "1.0.30001769", "postcss": "8.4.31", "styled-jsx": "5.1.6" }, "optionalDependencies": { "@next/swc-darwin-arm64": "16.1.6", "@next/swc-darwin-x64": "16.1.6", "@next/swc-linux-arm64-gnu": "16.1.6", "@next/swc-linux-arm64-musl": "16.1.6", "@next/swc-linux-x64-gnu": "16.1.6", "@next/swc-linux-x64-musl": "16.1.6", "@next/swc-win32-arm64-msvc": "16.1.6", "@next/swc-win32-x64-msvc": "16.1.6", "@opentelemetry/api": "1.9.0", "sharp": "0.34.5" }, "peerDependencies": { "react": "19.2.4", "react-dom": "19.2.4" }, "bin": { "next": "dist/bin/next" } }, "sha512-hkyRkcu5x/41KoqnROkfTm2pZVbKxvbZRuNvKXLRXxs3VfyO0WhY50TQS40EuKO9SW3rBj/sF3WbVwDACeMZyw=="], + "next/postcss": ["postcss@8.4.31", "", { "dependencies": { "nanoid": "3.3.11", "picocolors": "1.1.1", "source-map-js": "1.2.1" } }, "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ=="], "parse-entities/@types/unist": ["@types/unist@2.0.11", "", {}, "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA=="], "parse5/entities": ["entities@6.0.1", "", {}, "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g=="], + + "fumadocs-core/next/@next/env": ["@next/env@16.1.6", "", {}, "sha512-N1ySLuZjnAtN3kFnwhAwPvZah8RJxKasD7x1f8shFqhncnWZn4JMfg37diLNuoHsLAlrDfM3g4mawVdtAG8XLQ=="], + + "fumadocs-core/next/@next/swc-darwin-arm64": ["@next/swc-darwin-arm64@16.1.6", "", { "os": "darwin", "cpu": "arm64" }, "sha512-wTzYulosJr/6nFnqGW7FrG3jfUUlEf8UjGA0/pyypJl42ExdVgC6xJgcXQ+V8QFn6niSG2Pb8+MIG1mZr2vczw=="], + + "fumadocs-core/next/@next/swc-darwin-x64": ["@next/swc-darwin-x64@16.1.6", "", { "os": "darwin", "cpu": "x64" }, "sha512-BLFPYPDO+MNJsiDWbeVzqvYd4NyuRrEYVB5k2N3JfWncuHAy2IVwMAOlVQDFjj+krkWzhY2apvmekMkfQR0CUQ=="], + + "fumadocs-core/next/@next/swc-linux-arm64-gnu": ["@next/swc-linux-arm64-gnu@16.1.6", "", { "os": "linux", "cpu": "arm64" }, "sha512-OJYkCd5pj/QloBvoEcJ2XiMnlJkRv9idWA/j0ugSuA34gMT6f5b7vOiCQHVRpvStoZUknhl6/UxOXL4OwtdaBw=="], + + "fumadocs-core/next/@next/swc-linux-arm64-musl": ["@next/swc-linux-arm64-musl@16.1.6", "", { "os": "linux", "cpu": "arm64" }, "sha512-S4J2v+8tT3NIO9u2q+S0G5KdvNDjXfAv06OhfOzNDaBn5rw84DGXWndOEB7d5/x852A20sW1M56vhC/tRVbccQ=="], + + "fumadocs-core/next/@next/swc-linux-x64-gnu": ["@next/swc-linux-x64-gnu@16.1.6", "", { "os": "linux", "cpu": "x64" }, "sha512-2eEBDkFlMMNQnkTyPBhQOAyn2qMxyG2eE7GPH2WIDGEpEILcBPI/jdSv4t6xupSP+ot/jkfrCShLAa7+ZUPcJQ=="], + + "fumadocs-core/next/@next/swc-linux-x64-musl": ["@next/swc-linux-x64-musl@16.1.6", "", { "os": "linux", "cpu": "x64" }, "sha512-oicJwRlyOoZXVlxmIMaTq7f8pN9QNbdes0q2FXfRsPhfCi8n8JmOZJm5oo1pwDaFbnnD421rVU409M3evFbIqg=="], + + "fumadocs-core/next/@next/swc-win32-arm64-msvc": ["@next/swc-win32-arm64-msvc@16.1.6", "", { "os": "win32", "cpu": "arm64" }, "sha512-gQmm8izDTPgs+DCWH22kcDmuUp7NyiJgEl18bcr8irXA5N2m2O+JQIr6f3ct42GOs9c0h8QF3L5SzIxcYAAXXw=="], + + "fumadocs-core/next/@next/swc-win32-x64-msvc": ["@next/swc-win32-x64-msvc@16.1.6", "", { "os": "win32", "cpu": "x64" }, "sha512-NRfO39AIrzBnixKbjuo2YiYhB6o9d8v/ymU9m/Xk8cyVk+k7XylniXkHwjs4s70wedVffc6bQNbufk5v0xEm0A=="], + + "fumadocs-core/next/postcss": ["postcss@8.4.31", "", { "dependencies": { "nanoid": "3.3.11", "picocolors": "1.1.1", "source-map-js": "1.2.1" } }, "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ=="], + + "fumadocs-mdx/next/@next/env": ["@next/env@16.1.6", "", {}, "sha512-N1ySLuZjnAtN3kFnwhAwPvZah8RJxKasD7x1f8shFqhncnWZn4JMfg37diLNuoHsLAlrDfM3g4mawVdtAG8XLQ=="], + + "fumadocs-mdx/next/@next/swc-darwin-arm64": ["@next/swc-darwin-arm64@16.1.6", "", { "os": "darwin", "cpu": "arm64" }, "sha512-wTzYulosJr/6nFnqGW7FrG3jfUUlEf8UjGA0/pyypJl42ExdVgC6xJgcXQ+V8QFn6niSG2Pb8+MIG1mZr2vczw=="], + + "fumadocs-mdx/next/@next/swc-darwin-x64": ["@next/swc-darwin-x64@16.1.6", "", { "os": "darwin", "cpu": "x64" }, "sha512-BLFPYPDO+MNJsiDWbeVzqvYd4NyuRrEYVB5k2N3JfWncuHAy2IVwMAOlVQDFjj+krkWzhY2apvmekMkfQR0CUQ=="], + + "fumadocs-mdx/next/@next/swc-linux-arm64-gnu": ["@next/swc-linux-arm64-gnu@16.1.6", "", { "os": "linux", "cpu": "arm64" }, "sha512-OJYkCd5pj/QloBvoEcJ2XiMnlJkRv9idWA/j0ugSuA34gMT6f5b7vOiCQHVRpvStoZUknhl6/UxOXL4OwtdaBw=="], + + "fumadocs-mdx/next/@next/swc-linux-arm64-musl": ["@next/swc-linux-arm64-musl@16.1.6", "", { "os": "linux", "cpu": "arm64" }, "sha512-S4J2v+8tT3NIO9u2q+S0G5KdvNDjXfAv06OhfOzNDaBn5rw84DGXWndOEB7d5/x852A20sW1M56vhC/tRVbccQ=="], + + "fumadocs-mdx/next/@next/swc-linux-x64-gnu": ["@next/swc-linux-x64-gnu@16.1.6", "", { "os": "linux", "cpu": "x64" }, "sha512-2eEBDkFlMMNQnkTyPBhQOAyn2qMxyG2eE7GPH2WIDGEpEILcBPI/jdSv4t6xupSP+ot/jkfrCShLAa7+ZUPcJQ=="], + + "fumadocs-mdx/next/@next/swc-linux-x64-musl": ["@next/swc-linux-x64-musl@16.1.6", "", { "os": "linux", "cpu": "x64" }, "sha512-oicJwRlyOoZXVlxmIMaTq7f8pN9QNbdes0q2FXfRsPhfCi8n8JmOZJm5oo1pwDaFbnnD421rVU409M3evFbIqg=="], + + "fumadocs-mdx/next/@next/swc-win32-arm64-msvc": ["@next/swc-win32-arm64-msvc@16.1.6", "", { "os": "win32", "cpu": "arm64" }, "sha512-gQmm8izDTPgs+DCWH22kcDmuUp7NyiJgEl18bcr8irXA5N2m2O+JQIr6f3ct42GOs9c0h8QF3L5SzIxcYAAXXw=="], + + "fumadocs-mdx/next/@next/swc-win32-x64-msvc": ["@next/swc-win32-x64-msvc@16.1.6", "", { "os": "win32", "cpu": "x64" }, "sha512-NRfO39AIrzBnixKbjuo2YiYhB6o9d8v/ymU9m/Xk8cyVk+k7XylniXkHwjs4s70wedVffc6bQNbufk5v0xEm0A=="], + + "fumadocs-mdx/next/postcss": ["postcss@8.4.31", "", { "dependencies": { "nanoid": "3.3.11", "picocolors": "1.1.1", "source-map-js": "1.2.1" } }, "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ=="], + + "fumadocs-ui/next/@next/env": ["@next/env@16.1.6", "", {}, "sha512-N1ySLuZjnAtN3kFnwhAwPvZah8RJxKasD7x1f8shFqhncnWZn4JMfg37diLNuoHsLAlrDfM3g4mawVdtAG8XLQ=="], + + "fumadocs-ui/next/@next/swc-darwin-arm64": ["@next/swc-darwin-arm64@16.1.6", "", { "os": "darwin", "cpu": "arm64" }, "sha512-wTzYulosJr/6nFnqGW7FrG3jfUUlEf8UjGA0/pyypJl42ExdVgC6xJgcXQ+V8QFn6niSG2Pb8+MIG1mZr2vczw=="], + + "fumadocs-ui/next/@next/swc-darwin-x64": ["@next/swc-darwin-x64@16.1.6", "", { "os": "darwin", "cpu": "x64" }, "sha512-BLFPYPDO+MNJsiDWbeVzqvYd4NyuRrEYVB5k2N3JfWncuHAy2IVwMAOlVQDFjj+krkWzhY2apvmekMkfQR0CUQ=="], + + "fumadocs-ui/next/@next/swc-linux-arm64-gnu": ["@next/swc-linux-arm64-gnu@16.1.6", "", { "os": "linux", "cpu": "arm64" }, "sha512-OJYkCd5pj/QloBvoEcJ2XiMnlJkRv9idWA/j0ugSuA34gMT6f5b7vOiCQHVRpvStoZUknhl6/UxOXL4OwtdaBw=="], + + "fumadocs-ui/next/@next/swc-linux-arm64-musl": ["@next/swc-linux-arm64-musl@16.1.6", "", { "os": "linux", "cpu": "arm64" }, "sha512-S4J2v+8tT3NIO9u2q+S0G5KdvNDjXfAv06OhfOzNDaBn5rw84DGXWndOEB7d5/x852A20sW1M56vhC/tRVbccQ=="], + + "fumadocs-ui/next/@next/swc-linux-x64-gnu": ["@next/swc-linux-x64-gnu@16.1.6", "", { "os": "linux", "cpu": "x64" }, "sha512-2eEBDkFlMMNQnkTyPBhQOAyn2qMxyG2eE7GPH2WIDGEpEILcBPI/jdSv4t6xupSP+ot/jkfrCShLAa7+ZUPcJQ=="], + + "fumadocs-ui/next/@next/swc-linux-x64-musl": ["@next/swc-linux-x64-musl@16.1.6", "", { "os": "linux", "cpu": "x64" }, "sha512-oicJwRlyOoZXVlxmIMaTq7f8pN9QNbdes0q2FXfRsPhfCi8n8JmOZJm5oo1pwDaFbnnD421rVU409M3evFbIqg=="], + + "fumadocs-ui/next/@next/swc-win32-arm64-msvc": ["@next/swc-win32-arm64-msvc@16.1.6", "", { "os": "win32", "cpu": "arm64" }, "sha512-gQmm8izDTPgs+DCWH22kcDmuUp7NyiJgEl18bcr8irXA5N2m2O+JQIr6f3ct42GOs9c0h8QF3L5SzIxcYAAXXw=="], + + "fumadocs-ui/next/@next/swc-win32-x64-msvc": ["@next/swc-win32-x64-msvc@16.1.6", "", { "os": "win32", "cpu": "x64" }, "sha512-NRfO39AIrzBnixKbjuo2YiYhB6o9d8v/ymU9m/Xk8cyVk+k7XylniXkHwjs4s70wedVffc6bQNbufk5v0xEm0A=="], + + "fumadocs-ui/next/postcss": ["postcss@8.4.31", "", { "dependencies": { "nanoid": "3.3.11", "picocolors": "1.1.1", "source-map-js": "1.2.1" } }, "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ=="], } } diff --git a/content/stack/cipherstash/encryption/bulk-operations.mdx b/content/stack/cipherstash/encryption/bulk-operations.mdx new file mode 100644 index 0000000..75112e9 --- /dev/null +++ b/content/stack/cipherstash/encryption/bulk-operations.mdx @@ -0,0 +1,206 @@ +--- +title: Bulk operations +description: Encrypt and decrypt arrays of raw values in a single ZeroKMS round-trip using bulkEncrypt and bulkDecrypt +--- + +# Bulk operations + +`bulkEncrypt` and `bulkDecrypt` encrypt or decrypt an array of raw values in a single call to ZeroKMS. Every value still gets its own unique key. The batch just pays the network round-trip once, regardless of how many items you pass. + +This page covers the raw-value variants. If you want to encrypt whole objects (records with multiple fields), see [Model operations](/stack/cipherstash/encryption/models) instead. + +For full method signatures, see the [`EncryptionClient` API reference](/stack/reference/stack/latest/packages/stack/src/encryption/classes/EncryptionClient). + +## Why bulk matters + +Calling `encrypt` in a loop makes one ZeroKMS request per value. For 100 emails that is 100 round-trips. `bulkEncrypt` collapses those into one. + +The throughput gain is significant for any batch larger than a handful of records. Use bulk operations whenever you are processing more than one value at a time. + +## `bulkEncrypt` + +Pass an array of `{ id, plaintext }` objects. The `id` is your correlation key: it flows through to the output so you can match encrypted results back to your source records. + +```typescript filename="bulk-encrypt.ts" +import { Encryption } from "@cipherstash/stack" +import { encryptedTable, encryptedColumn } from "@cipherstash/stack/schema" + +const users = encryptedTable("users", { + email: encryptedColumn("email").equality().freeTextSearch(), +}) + +const client = await Encryption({ schemas: [users] }) + +const plaintexts = [ + { id: "u1", plaintext: "alice@example.com" }, + { id: "u2", plaintext: "bob@example.com" }, + { id: "u3", plaintext: "charlie@example.com" }, +] + +const result = await client.bulkEncrypt(plaintexts, { + column: users.email, + table: users, +}) + +if (result.failure) { + throw new Error(`Bulk encryption failed: ${result.failure.message}`) +} + +// result.data is an array of { id: string, data: Encrypted } +// The id matches the id you passed in +const encrypted = result.data +``` + +### Input shape + +Each element in the input array takes this shape: + +| Field | Type | Required | Description | +|---|---|---|---| +| `id` | `string` | No | Correlation key returned in the output | +| `plaintext` | `string \| number \| boolean \| null` | Yes | The value to encrypt | + +You can omit `id` when you do not need to correlate results (for example, when processing an ordered list where position is the correlation). + +### Mapping results back to records + +When `id` is present, use it to build a lookup map: + +```typescript filename="bulk-encrypt-map.ts" +const encryptedByUserId = Object.fromEntries( + result.data.map((item) => [item.id, item.data]), +) + +// encryptedByUserId["u1"] → Encrypted payload for alice +``` + +## `bulkDecrypt` + +Pass the array produced by `bulkEncrypt`. Results come back in the same order, with per-item success or failure. + +```typescript filename="bulk-decrypt.ts" +const decrypted = await client.bulkDecrypt(encrypted) + +if (decrypted.failure) { + throw new Error(`Bulk decryption failed: ${decrypted.failure.message}`) +} + +for (const item of decrypted.data) { + if ("data" in item) { + console.log(`${item.id}: ${item.data}`) + } else { + console.error(`${item.id} failed: ${item.error}`) + } +} +``` + +### Per-item failure handling + +`bulkDecrypt` returns a top-level `Result` wrapping an array where each element is either a success or a per-item error. The top-level `failure` fires for infrastructure errors (network, auth). Individual decryption failures surface as `{ id, error }` items in the array. + +```typescript filename="bulk-decrypt-errors.ts" +const successful: string[] = [] +const failed: string[] = [] + +for (const item of decrypted.data) { + if ("data" in item) { + successful.push(item.data as string) + } else { + failed.push(item.id) + } +} +``` + +### Ordering guarantee + +`bulkDecrypt` returns items in the same order as the input array. If you do not use `id`, you can rely on index position for correlation. + +## Complete example: bulk insert with UNNEST + +This pattern encrypts an array of values and inserts them into PostgreSQL with a single multi-row statement. + +```typescript filename="bulk-insert.ts" +import { Pool } from "pg" + +const pool = new Pool({ connectionString: process.env.DATABASE_URL }) + +async function insertUsers(emails: string[]) { + const plaintexts = emails.map((email, i) => ({ + id: String(i), + plaintext: email, + })) + + const encryptResult = await client.bulkEncrypt(plaintexts, { + column: users.email, + table: users, + }) + + if (encryptResult.failure) { + throw new Error(`Encryption failed: ${encryptResult.failure.message}`) + } + + const encryptedValues = encryptResult.data.map((item) => item.data) + + const result = await pool.query( + `INSERT INTO users (email) + SELECT * FROM UNNEST($1::jsonb[]) + RETURNING id`, + [encryptedValues], + ) + + return result.rows.map((row) => row.id) +} +``` + + + Always use the `::jsonb` cast when passing encrypted values to PostgreSQL. This ensures PostgreSQL handles the CipherCell JSON payload correctly. + + +For the table setup and single-record insert pattern, see [Storing encrypted data](/stack/cipherstash/encryption/storing-data). + +## Identity-aware bulk encryption + +Lock an entire batch to a user's identity by chaining `.withLockContext()`: + +```typescript filename="bulk-encrypt-identity.ts" +import { LockContext } from "@cipherstash/stack/identity" + +const lc = new LockContext() +const lockContext = (await lc.identify(userJwt)).data! + +const encrypted = await client + .bulkEncrypt(plaintexts, { column: users.email, table: users }) + .withLockContext(lockContext) + +const decrypted = await client + .bulkDecrypt(encrypted.data) + .withLockContext(lockContext) +``` + +See [Identity-aware encryption](/stack/cipherstash/encryption/identity) for the full lock context flow. + +## When to use bulk vs model operations + +| Scenario | Recommended method | +|---|---| +| Encrypting one field from a list of records | `bulkEncrypt` / `bulkDecrypt` | +| Encrypting whole records with multiple encrypted fields | `bulkEncryptModels` / `bulkDecryptModels` | +| Migrating a single column in an existing table | `bulkEncrypt` | +| Inserting new records from a form or API payload | `bulkEncryptModels` | + +The rule of thumb: use raw bulk methods when you are working with a single field across many records. Use model methods when you have whole objects to round-trip. + +See [Model operations](/stack/cipherstash/encryption/models) for `bulkEncryptModels` and `bulkDecryptModels`. + +## ORM integrations + +Drizzle and DynamoDB have adapter-level bulk support that wraps these methods: + +- [Drizzle bulk insert](/stack/cipherstash/encryption/drizzle): `bulkEncryptModels` with Drizzle `.values()` +- [DynamoDB bulk operations](/stack/cipherstash/encryption/dynamodb): `BatchWriteItem` and `BatchGetItem` wrappers + +## Next steps + +- [Model operations](/stack/cipherstash/encryption/models): encrypt whole records in one call +- [Storing encrypted data](/stack/cipherstash/encryption/storing-data): raw SQL insert and retrieve patterns +- [Identity-aware encryption](/stack/cipherstash/encryption/identity): scope encryption to a user's JWT diff --git a/content/stack/cipherstash/encryption/indexes.mdx b/content/stack/cipherstash/encryption/indexes.mdx new file mode 100644 index 0000000..ccaf4c5 --- /dev/null +++ b/content/stack/cipherstash/encryption/indexes.mdx @@ -0,0 +1,148 @@ +--- +title: Setting up indexes +description: Create PostgreSQL indexes for encrypted columns. Index syntax differs between self-hosted PostgreSQL and managed databases like Supabase. +--- + +# Setting up indexes + +Encrypted columns need PostgreSQL indexes for fast queries. Without an index, the database performs a sequential scan: correct but slow at scale. + +Index syntax differs between deployment types. Self-hosted PostgreSQL with full EQL installed supports custom operator classes and can use B-tree indexes directly on `eql_v2_encrypted` columns. Managed databases like Supabase cannot install operator families (they require superuser), so indexes must use extraction functions instead. + +## Deployment matrix + +| Query type | Self-hosted (full EQL) | Supabase | +|---|---|---| +| Equality | `USING btree (col)` with opclass, or `USING hash (eql_v2.hmac_256(col))` | `USING hash (eql_v2.hmac_256(col))` only | +| Range / ORDER BY | `USING btree (col)` with opclass | None (OPE-index work in progress) | +| Pattern match | `USING gin (eql_v2.bloom_filter(col))` | Same | +| JSONB containment | `USING gin (eql_v2.ste_vec(col))` | Same | + + + Range filters (`>`, `>=`, `<`, `<=`) work on Supabase without a range index (they use a sequential scan). `ORDER BY` on encrypted columns is not supported on Supabase at all. Sort application-side after decrypting results. Operator family support for Supabase is in development. + + +--- + +## Equality + +Equality indexes speed up `WHERE col = $1` queries and `IN` lists. + +**Self-hosted (B-tree with operator class):** + +```sql +CREATE INDEX ON users USING btree (email); +``` + +This works because the full EQL install registers a B-tree operator class for `eql_v2_encrypted` that compares HMAC terms. + +**Self-hosted or Supabase (hash on extraction function):** + +```sql +CREATE INDEX ON users USING hash (eql_v2.hmac_256(email)); +``` + +This form works on both deployment types. Use it when you want one index that works everywhere, or when you are on Supabase. + +See queries: [Equality queries](/stack/cipherstash/encryption/queries#equality) + +--- + +## Match + +Match indexes speed up `WHERE col LIKE $1` and `ILIKE` queries. They use a GIN index on the Bloom filter extracted from each encrypted value. + +```sql +CREATE INDEX ON users USING gin (eql_v2.bloom_filter(name)); +``` + +This form is identical for self-hosted and Supabase. + +See queries: [Match queries](/stack/cipherstash/encryption/queries#match-free-text) + +--- + +## Range and order + +Range indexes support `>`, `>=`, `<`, `<=`, `BETWEEN`, and `ORDER BY` on encrypted columns. + +**Self-hosted (B-tree with operator class):** + +```sql +CREATE INDEX ON users USING btree (age); +``` + +Requires the EQL operator family (`CREATE OPERATOR FAMILY`) to be installed. The full EQL install includes this. The `--exclude-operator-family` install flag omits it. + +**Supabase:** + +Functional range indexes for Supabase are not yet available. Range _filters_ work without an index (sequential scan). `ORDER BY` on encrypted columns is not supported on Supabase. + +See queries: [Range queries](/stack/cipherstash/encryption/queries#range-and-ordering) + +--- + +## JSONB + +JSONB indexes support path existence and containment queries on encrypted JSON columns. + +```sql +CREATE INDEX ON documents USING gin (eql_v2.ste_vec(metadata)); +``` + +This form is identical for self-hosted and Supabase. + +See queries: [JSONB queries](/stack/cipherstash/encryption/queries#jsonb-queries) + +--- + +## Supabase query forms + +This is the most common source of silent performance problems with encrypted columns on Supabase. + +A functional index on `eql_v2.hmac_256(email)` is only engaged when the query uses the same extraction function. A bare `WHERE email = $1` query does not use the index, even if the index exists. The database falls back to a sequential scan: your query returns correct results, but it scans every row. + +**Wrong (does not use functional index):** + +```sql +SELECT * FROM users WHERE email = $1::eql_v2_encrypted; +``` + +**Right (engages the functional index):** + +```sql +SELECT * FROM users WHERE eql_v2.hmac_256(email) = eql_v2.hmac_256($1::eql_v2_encrypted); +``` + + + SDK wrappers (Drizzle adapter, Supabase wrapper) generate the correct query form automatically. This only matters when you write raw SQL queries against Supabase encrypted columns. If you are using the Drizzle adapter or Supabase wrapper, no action is needed. + + +The same principle applies to `eql_v2.bloom_filter` and `eql_v2.ste_vec` indexes: the extraction function must appear in both the index definition and the query predicate. + +--- + +## Complete example + +```sql filename="migrations/add_encrypted_indexes.sql" +-- Equality index (Supabase-compatible form) +CREATE INDEX users_email_eq_idx ON users USING hash (eql_v2.hmac_256(email)); + +-- Match index +CREATE INDEX users_name_match_idx ON users USING gin (eql_v2.bloom_filter(name)); + +-- JSONB index +CREATE INDEX documents_metadata_ste_idx ON documents USING gin (eql_v2.ste_vec(metadata)); + +-- Range index (self-hosted only — requires operator family) +CREATE INDEX users_age_range_idx ON users USING btree (age); +``` + +--- + +## Related + +- [Searchable encryption queries](/stack/cipherstash/encryption/queries): Query patterns for each index type +- [Searchable encryption overview](/stack/cipherstash/encryption/searchable-encryption): How searchable indexes work +- [Supabase integration](/stack/cipherstash/supabase): Supabase-specific setup and limitations +- [EQL guide](/stack/reference/eql-guide): Full reference for EQL types and functions diff --git a/content/stack/cipherstash/encryption/meta.json b/content/stack/cipherstash/encryption/meta.json index 2563481..3b3c1d7 100644 --- a/content/stack/cipherstash/encryption/meta.json +++ b/content/stack/cipherstash/encryption/meta.json @@ -10,7 +10,12 @@ "schema", "encrypt-decrypt", "searchable-encryption", + "queries", + "indexes", "identity", + "---Operations---", + "models", + "bulk-operations", "---Integrations---", "storing-data", "drizzle", diff --git a/content/stack/cipherstash/encryption/models.mdx b/content/stack/cipherstash/encryption/models.mdx new file mode 100644 index 0000000..a3f59b3 --- /dev/null +++ b/content/stack/cipherstash/encryption/models.mdx @@ -0,0 +1,270 @@ +--- +title: Model operations +description: Encrypt and decrypt entire records with schema-driven field selection using encryptModel, decryptModel, bulkEncryptModels, and bulkDecryptModels +--- + +# Model operations + +Model methods encrypt or decrypt an entire object in one call. The SDK inspects your schema to find which fields to encrypt and leaves all other fields on the object unchanged. + +This is the recommended approach when working with database records: pass the object in, get the encrypted (or decrypted) version back, and write it to the database. + +For full method signatures, see the [`EncryptionClient` API reference](/stack/reference/stack/latest/packages/stack/src/encryption/classes/EncryptionClient). + +## How schema-driven selection works + +When you call `encryptModel(record, schema)`, the SDK compares the object's keys against the columns declared in your `encryptedTable` schema. Fields that match a schema column are encrypted. Everything else passes through as-is. + +Given this schema: + +```typescript filename="schema.ts" +import { encryptedTable, encryptedColumn } from "@cipherstash/stack/schema" + +const users = encryptedTable("users", { + email: encryptedColumn("email").equality().freeTextSearch(), + ssn: encryptedColumn("ssn").equality(), +}) +``` + +And this record: + +```typescript +const user = { + id: "user_123", // not in schema + email: "alice@example.com", // in schema + ssn: "123-45-6789", // in schema + createdAt: new Date(), // not in schema + role: "admin", // not in schema +} +``` + +The field selection looks like this: + +| Field | In schema | After `encryptModel` | +|---|---|---| +| `id` | No | `string` (unchanged) | +| `email` | Yes | `Encrypted` | +| `ssn` | Yes | `Encrypted` | +| `createdAt` | No | `Date` (unchanged) | +| `role` | No | `string` (unchanged) | + +## `encryptModel` + +Encrypts one object. Returns a `Result` wrapping the encrypted object. + +```typescript filename="encrypt-model.ts" +import { Encryption } from "@cipherstash/stack" +import { encryptedTable, encryptedColumn } from "@cipherstash/stack/schema" + +const users = encryptedTable("users", { + email: encryptedColumn("email").equality().freeTextSearch(), + ssn: encryptedColumn("ssn").equality(), +}) + +const client = await Encryption({ schemas: [users] }) + +const user = { + id: "user_123", + email: "alice@example.com", + ssn: "123-45-6789", + createdAt: new Date(), +} + +const result = await client.encryptModel(user, users) + +if (result.failure) { + throw new Error(`Encryption failed: ${result.failure.message}`) +} + +const encryptedUser = result.data +// encryptedUser.email → Encrypted +// encryptedUser.ssn → Encrypted +// encryptedUser.id → "user_123" (unchanged) +``` + +### Schema-aware types + +TypeScript infers the return type from the schema. Let the compiler do the work: do not pass an explicit type parameter unless you need backward compatibility. + +```typescript filename="encrypt-model-types.ts" +// Let TypeScript infer — the return type reflects exactly which fields are encrypted +const result = await client.encryptModel(user, users) +// result.data.email is typed as Encrypted +// result.data.id is typed as string + +// Explicit type parameter — return type degrades to User +const result = await client.encryptModel(user, users) + +// Explicit model and schema types — fully schema-aware +const result = await client.encryptModel(user, users) +``` + +## `decryptModel` + +Decrypts one encrypted object. The SDK detects which fields are encrypted payloads and decrypts them. Non-encrypted fields pass through. + +```typescript filename="decrypt-model.ts" +const decResult = await client.decryptModel(encryptedUser) + +if (decResult.failure) { + throw new Error(`Decryption failed: ${decResult.failure.message}`) +} + +const decryptedUser = decResult.data +// decryptedUser.email → "alice@example.com" +// decryptedUser.ssn → "123-45-6789" +// decryptedUser.id → "user_123" +``` + +`decryptModel` does not require a schema argument. It detects encrypted fields by inspecting the payload structure. + +## `bulkEncryptModels` + +Encrypts an array of objects in a single ZeroKMS round-trip. All records share one network call, while each field in each record still gets its own unique key. + +```typescript filename="bulk-encrypt-models.ts" +const records = [ + { id: "1", email: "alice@example.com", ssn: "111-22-3333", role: "admin" }, + { id: "2", email: "bob@example.com", ssn: "444-55-6666", role: "user" }, + { id: "3", email: "cara@example.com", ssn: "777-88-9999", role: "user" }, +] + +const result = await client.bulkEncryptModels(records, users) + +if (result.failure) { + throw new Error(`Bulk encryption failed: ${result.failure.message}`) +} + +// result.data is an array of encrypted records, same order as input +const encryptedRecords = result.data +``` + +### Writing encrypted records to PostgreSQL + +```typescript filename="bulk-insert.ts" +import { Pool } from "pg" + +const pool = new Pool({ connectionString: process.env.DATABASE_URL }) + +async function createUsers(users: { email: string; ssn: string; role: string }[]) { + const result = await client.bulkEncryptModels(users, usersSchema) + + if (result.failure) { + throw new Error(result.failure.message) + } + + const values = result.data.map((r) => [r.email, r.ssn, r.role]) + + await pool.query( + `INSERT INTO users (email, ssn, role) + SELECT * FROM UNNEST($1::jsonb[], $2::jsonb[], $3::text[])`, + [ + result.data.map((r) => r.email), + result.data.map((r) => r.ssn), + result.data.map((r) => r.role), + ], + ) +} +``` + +## `bulkDecryptModels` + +Decrypts an array of encrypted records in a single ZeroKMS call. Returns `Decrypted[]` — an array of plain objects with all encrypted fields resolved back to their original types. + +```typescript filename="bulk-decrypt-models.ts" +const decResult = await client.bulkDecryptModels(encryptedRecords) + +if (decResult.failure) { + throw new Error(`Bulk decryption failed: ${decResult.failure.message}`) +} + +for (const user of decResult.data) { + console.log(user.email, user.ssn) +} +``` + +### Decrypting database query results + +Fetch rows from PostgreSQL and pass the array directly to `bulkDecryptModels`: + +```typescript filename="fetch-decrypt.ts" +const { rows } = await pool.query("SELECT * FROM users LIMIT 100") + +const decResult = await client.bulkDecryptModels(rows) + +if (decResult.failure) { + throw new Error(decResult.failure.message) +} + +const users = decResult.data +``` + +## Failure handling + +All model methods return a `Result` object. The top-level `failure` fires when the entire operation fails (network error, auth failure, invalid credentials). There is no per-item failure for model operations: if any record fails, the whole call fails. + +```typescript filename="error-handling.ts" +const result = await client.bulkEncryptModels(records, users) + +if (result.failure) { + console.error(result.failure.type) // e.g. "EncryptionError" + console.error(result.failure.message) // human-readable description +} +``` + +See [Error handling](/stack/reference/error-handling) for the full set of error types. + +## Identity-aware model operations + +Chain `.withLockContext()` to bind encryption to a user's JWT: + +```typescript filename="model-with-identity.ts" +import { LockContext } from "@cipherstash/stack/identity" + +const lc = new LockContext() +const lockContext = (await lc.identify(userJwt)).data! + +// Single record +const encrypted = await client + .encryptModel(user, users) + .withLockContext(lockContext) + +// Bulk records — one ZeroKMS call, all locked to the same identity +const bulkEncrypted = await client + .bulkEncryptModels(records, users) + .withLockContext(lockContext) + +const bulkDecrypted = await client + .bulkDecryptModels(encryptedRecords) + .withLockContext(lockContext) +``` + +See [Identity-aware encryption](/stack/cipherstash/encryption/identity) for the full flow. + +## When to use model methods vs raw bulk + +| Scenario | Recommended method | +|---|---| +| Inserting new records from an API payload | `encryptModel` / `bulkEncryptModels` | +| Reading records from a database and decrypting for display | `decryptModel` / `bulkDecryptModels` | +| Encrypting one specific field across many records (migration, import) | `bulkEncrypt` | +| Encrypting a single value for a query term | `encrypt` | + +Use model methods when you have whole records to round-trip. Use raw bulk methods when you are targeting a single field across many records. + +See [Bulk operations](/stack/cipherstash/encryption/bulk-operations) for `bulkEncrypt` and `bulkDecrypt`. + +## ORM integrations + +The Drizzle, Supabase, and DynamoDB adapters wrap model methods behind their own APIs: + +- [Drizzle ORM](/stack/cipherstash/encryption/drizzle): `encryptModel` and `bulkEncryptModels` used behind `db.insert()` +- [Supabase](/stack/cipherstash/encryption/supabase): `encryptedSupabase` handles model encryption transparently +- [DynamoDB](/stack/cipherstash/encryption/dynamodb): `encryptedDynamoDB` wraps `PutItem` and `GetItem` + +## Next steps + +- [Bulk operations](/stack/cipherstash/encryption/bulk-operations): raw-value bulk encrypt and decrypt +- [Schema definition](/stack/cipherstash/encryption/schema): declare which fields to encrypt +- [Storing encrypted data](/stack/cipherstash/encryption/storing-data): raw SQL insert and retrieve patterns +- [Identity-aware encryption](/stack/cipherstash/encryption/identity): scope encryption to a user's JWT diff --git a/content/stack/cipherstash/encryption/queries.mdx b/content/stack/cipherstash/encryption/queries.mdx new file mode 100644 index 0000000..9e4d31c --- /dev/null +++ b/content/stack/cipherstash/encryption/queries.mdx @@ -0,0 +1,271 @@ +--- +title: Searchable encryption queries +description: Equality, match, and range query patterns for encrypted PostgreSQL columns, with SDK predicates and raw SQL forms. +--- + +# Searchable encryption queries + +This page covers the three query families available for encrypted columns: equality, match (free-text), and range/order. Each section shows the SDK predicate, the raw SQL form, the underlying EQL index, and links to the corresponding index setup. + +For index creation (the `CREATE INDEX` statements your database needs), see [Setting up indexes](/stack/cipherstash/encryption/indexes). + +For a conceptual overview of how searchable encryption works, see [Searchable encryption](/stack/cipherstash/encryption/searchable-encryption). + +## Equality + +Exact match on an encrypted column. Uses the `unique` (HMAC-SHA256) index. + +**Schema:** + +```typescript filename="src/schema.ts" +import { encryptedTable, encryptedColumn } from "@cipherstash/stack/schema" + +const users = encryptedTable("users", { + email: encryptedColumn("email").equality(), +}) +``` + +**SDK (single value):** + +```typescript filename="src/queries.ts" +const term = await client.encryptQuery("alice@example.com", { + column: users.email, + table: users, + queryType: "equality", +}) + +const result = await pgClient.query( + "SELECT * FROM users WHERE email = $1", + [term.data], +) +``` + +**SDK (IN list):** + +```typescript filename="src/queries.ts" +const terms = await client.encryptQuery([ + { value: "alice@example.com", column: users.email, table: users, queryType: "equality" as const }, + { value: "bob@example.com", column: users.email, table: users, queryType: "equality" as const }, +]) + +// Use each term.data as a separate parameter, or build an ANY($1) query. +``` + +**Drizzle:** + +```typescript filename="src/queries.ts" +const results = await db + .select() + .from(usersTable) + .where(await encryptionOps.eq(usersTable.email, "alice@example.com")) +``` + +**Supabase wrapper:** + +```typescript filename="src/queries.ts" +const { data } = await eSupabase + .from("users", users) + .select("id, email") + .eq("email", "alice@example.com") +``` + +**Raw SQL (self-hosted with EQL operator classes):** + +```sql +SELECT * FROM users WHERE email = $1::eql_v2_encrypted; +``` + +**Raw SQL (Supabase / functional index form):** + +```sql +SELECT * FROM users WHERE eql_v2.hmac_256(email) = eql_v2.hmac_256($1::eql_v2_encrypted); +``` + + + On Supabase, bare `WHERE email = $1` does not use the functional index. Wrap both sides with `eql_v2.hmac_256()` to engage the hash index. The SDK wrappers (Drizzle, Supabase wrapper) handle this automatically. See [Index setup: Supabase callout](/stack/cipherstash/encryption/indexes#supabase-query-forms). + + +**Underlying index:** [Equality index setup](/stack/cipherstash/encryption/indexes#equality) + +--- + +## Match (free-text) + +Substring and full-text search on an encrypted column. Uses the `match` (Bloom filter) index. Corresponds to `LIKE` / `ILIKE` semantics. + +**Schema:** + +```typescript filename="src/schema.ts" +const users = encryptedTable("users", { + name: encryptedColumn("name").freeTextSearch(), +}) +``` + +**SDK:** + +```typescript filename="src/queries.ts" +const term = await client.encryptQuery("alice", { + column: users.name, + table: users, + queryType: "freeTextSearch", +}) + +const result = await pgClient.query( + "SELECT * FROM users WHERE name LIKE $1", + [term.data], +) +``` + +**Drizzle:** + +```typescript filename="src/queries.ts" +const results = await db + .select() + .from(usersTable) + .where(await encryptionOps.ilike(usersTable.name, "%alice%")) +``` + +**Supabase wrapper:** + +```typescript filename="src/queries.ts" +const { data } = await eSupabase + .from("users", users) + .select("id, name") + .ilike("name", "%alice%") +``` + +**Raw SQL:** + +```sql +SELECT * FROM users WHERE name LIKE $1; +``` + +The Bloom filter index uses a GIN index on the extracted filter term. See [Match index setup](/stack/cipherstash/encryption/indexes#match). + +**Underlying index:** [Match index setup](/stack/cipherstash/encryption/indexes#match) + +--- + +## Range and ordering + +Comparison (`>`, `>=`, `<`, `<=`, `BETWEEN`) and `ORDER BY` on an encrypted column. Uses the `ore` (Order Revealing Encryption) index. + +**Schema:** + +```typescript filename="src/schema.ts" +const users = encryptedTable("users", { + age: encryptedColumn("age").dataType("number").orderAndRange(), +}) +``` + +**SDK (range filter):** + +```typescript filename="src/queries.ts" +const term = await client.encryptQuery(21, { + column: users.age, + table: users, + queryType: "orderAndRange", +}) + +const result = await pgClient.query( + "SELECT * FROM users WHERE age > $1", + [term.data], +) +``` + +**SDK, ORDER BY (self-hosted only):** + +```typescript filename="src/queries.ts" +// Self-hosted PostgreSQL with EQL operator families installed: +const result = await pgClient.query( + "SELECT * FROM users ORDER BY age ASC", +) + +// Without operator family support (Supabase, or --exclude-operator-family): +const result = await pgClient.query( + "SELECT * FROM users ORDER BY eql_v2.ore_block_u64_8_256(age) ASC", +) +``` + +**Drizzle:** + +```typescript filename="src/queries.ts" +// Range +const results = await db + .select() + .from(usersTable) + .where(await encryptionOps.gte(usersTable.age, 18)) + +// Sort (requires operator family support; not available on Supabase) +const results = await db + .select() + .from(usersTable) + .orderBy(encryptionOps.asc(usersTable.age)) +``` + +**Supabase wrapper:** + +```typescript filename="src/queries.ts" +// Range filter works +const { data } = await eSupabase + .from("users", users) + .select("id, age") + .gte("age", 18) + +// ORDER BY on encrypted columns is not supported on Supabase. +// Sort application-side after decrypting. +``` + + + `ORDER BY` on encrypted columns requires EQL operator families, which need superuser access to install. Supabase does not grant superuser. Range _filters_ (`>`, `>=`, `<`, `<=`) work on both self-hosted and Supabase. Sorting on encrypted columns is not currently supported on Supabase. Sort application-side after decrypting results. Operator family support for Supabase is being developed in collaboration with the Supabase and CipherStash teams. + + +**Underlying index:** [Range index setup](/stack/cipherstash/encryption/indexes#range-and-order) + +--- + +## JSONB queries + +Query encrypted JSON columns using path existence or containment. Uses the `ste_vec` index. + +**Schema:** + +```typescript filename="src/schema.ts" +const documents = encryptedTable("documents", { + metadata: encryptedColumn("metadata").searchableJson(), +}) +``` + +**SDK (path existence):** + +```typescript filename="src/queries.ts" +const term = await client.encryptQuery("$.user.role", { + column: documents.metadata, + table: documents, +}) + +const result = await pgClient.query( + "SELECT * FROM documents WHERE cs_ste_vec_v2(metadata) @> $1", + [term.data], +) +``` + +**SDK (containment):** + +```typescript filename="src/queries.ts" +const term = await client.encryptQuery({ role: "admin" }, { + column: documents.metadata, + table: documents, +}) +``` + +**Drizzle:** + +```typescript filename="src/queries.ts" +const results = await db + .select() + .from(documentsTable) + .where(await encryptionOps.jsonbPathExists(documentsTable.metadata, "$.user.role")) +``` + +**Underlying index:** [JSONB index setup](/stack/cipherstash/encryption/indexes#jsonb) diff --git a/content/stack/cipherstash/encryption/searchable-encryption.mdx b/content/stack/cipherstash/encryption/searchable-encryption.mdx index b4eb1b3..415a773 100644 --- a/content/stack/cipherstash/encryption/searchable-encryption.mdx +++ b/content/stack/cipherstash/encryption/searchable-encryption.mdx @@ -32,6 +32,11 @@ The result: your data is encrypted at rest, in transit, and during query evaluat npx stash db install ``` 2. Define your encryption schema with the appropriate search indexes +3. Create PostgreSQL indexes on your encrypted columns. See [Setting up indexes](/stack/cipherstash/encryption/indexes) for the correct `CREATE INDEX` syntax for your deployment (self-hosted vs Supabase). + + + Index creation syntax differs between self-hosted PostgreSQL and Supabase. On Supabase, queries against encrypted columns require a specific function-wrapped form to engage functional indexes. See [Setting up indexes](/stack/cipherstash/encryption/indexes) for the full guide. + ## What is EQL? diff --git a/content/stack/cipherstash/kms/index.mdx b/content/stack/cipherstash/kms/index.mdx index 7823463..336d5e9 100644 --- a/content/stack/cipherstash/kms/index.mdx +++ b/content/stack/cipherstash/kms/index.mdx @@ -1,9 +1,9 @@ --- -title: /KEY-MANAGEMENT +title: ZeroKMS description: 100x faster key management. Unique key per value, derived on demand, never stored. Backed by AWS KMS. --- -# /KEY-MANAGEMENT +# ZeroKMS ZeroKMS is the key management layer that powers [Encryption](/stack/cipherstash/encryption) and Secrets (coming soon). Every encrypted value gets its own unique key. Keys derived on demand, never stored. Identity and policy baked into every key. diff --git a/content/stack/cipherstash/meta.json b/content/stack/cipherstash/meta.json index 544a4bb..5a65ffd 100644 --- a/content/stack/cipherstash/meta.json +++ b/content/stack/cipherstash/meta.json @@ -5,6 +5,8 @@ "root": true, "pages": [ "index", + "---Postgres---", + "postgres", "supabase", "---Encryption---", "encryption", diff --git a/content/stack/cipherstash/postgres.mdx b/content/stack/cipherstash/postgres.mdx new file mode 100644 index 0000000..7c97736 --- /dev/null +++ b/content/stack/cipherstash/postgres.mdx @@ -0,0 +1,88 @@ +--- +title: PostgreSQL +description: CipherStash integration options for PostgreSQL databases. Choose the right path based on your ORM, deployment, and how much application control you need. +--- + +# PostgreSQL + +CipherStash provides field-level encryption for PostgreSQL databases. You choose the integration depth. All paths use the same key management ([ZeroKMS](/stack/cipherstash/kms)), the same searchable encryption primitives ([EQL](/stack/reference/eql-guide)), and produce the same encrypted storage format. + +## Choose your integration + +| If you want... | Use | +|---|---| +| Zero application code changes, transparent encryption at the wire | [Proxy](/stack/cipherstash/proxy) | +| Application-level control, ORM-agnostic | [Encryption SDK](/stack/cipherstash/encryption) | +| First-class Drizzle ORM integration | [Drizzle adapter](/stack/cipherstash/encryption/drizzle) | +| First-class Supabase JS SDK integration | [Supabase wrapper](/stack/cipherstash/supabase) | + +### Proxy + +[CipherStash Proxy](/stack/cipherstash/proxy) sits between your application and PostgreSQL. Your application connects to the proxy with a standard PostgreSQL connection string. The proxy encrypts and decrypts fields transparently using a policy file. No application code changes are required. + +Best for: existing applications you cannot modify, or teams who want encryption with zero SDK dependencies. + +### Encryption SDK + +The [Encryption SDK](/stack/cipherstash/encryption) is a Node.js library that encrypts values before they reach the database and decrypts them on the way back. You call `encrypt` and `decrypt` explicitly in your data layer. The SDK is ORM-agnostic and works with any PostgreSQL client. + +Best for: new applications or teams who want explicit control over which fields are encrypted and when. + +### Drizzle adapter + +The [Drizzle adapter](/stack/cipherstash/encryption/drizzle) wraps the Encryption SDK with Drizzle-native types and operators. Define encrypted columns with `encryptedType` in your Drizzle schema. Query them with `encryptionOps.eq`, `encryptionOps.ilike`, and other typed operators that mirror the standard Drizzle API. + +Best for: teams already using Drizzle ORM who want type-safe encrypted queries without writing raw SQL. + +### Supabase wrapper + +The [Supabase wrapper](/stack/cipherstash/supabase) wraps the Supabase JS client with transparent encryption on mutations and decryption on selects. Queries read identically to standard Supabase queries. It uses the Encryption SDK internally. + +Best for: teams using the Supabase JS client who want minimal query-layer changes. + +## What is EQL? + +EQL (Encrypt Query Language) is the PostgreSQL extension that makes encrypted queries possible. It provides the `eql_v2_encrypted` column type and the functions that index and compare encrypted values without decrypting them. Every integration path above relies on EQL. + +See [Encrypt Query Language](/stack/reference/eql-guide) for the full reference. + +## Index setup + +Encrypted columns require indexes for fast queries. Index syntax differs between self-hosted PostgreSQL (full EQL with operator classes) and managed databases like Supabase (no superuser, no operator families). See [Setting up indexes](/stack/cipherstash/encryption/indexes) for the complete setup guide, including the right index form for each deployment. + +## How these compose + +Proxy and the Encryption SDK are not mutually exclusive. A single PostgreSQL database can serve both: + +- One application uses the Proxy (legacy service with no code changes) +- Another application uses the Encryption SDK directly (new service with full control) + +Both write to the same `eql_v2_encrypted` columns and use the same keysets. Data encrypted by one path is readable by the other. + +The Drizzle and Supabase adapters sit on top of the Encryption SDK. They are not separate encryption implementations. Swapping between the raw SDK and an adapter does not change how data is stored or which keys are used. + +``` +Your application + │ + ├── Drizzle adapter ──┐ + ├── Supabase wrapper ─┤ + └── Raw SDK ──────────┤ + ▼ + Encryption SDK + │ + ▼ + ZeroKMS + │ + ▼ + PostgreSQL + (eql_v2_encrypted) +``` + +The Proxy is a separate path that does not use the SDK, but it writes the same encrypted format and reads from the same keysets. + +## Next steps + +- [Quickstart](/stack/quickstart): Encrypt your first fields in 15 minutes +- [Setting up indexes](/stack/cipherstash/encryption/indexes): Create PostgreSQL indexes for encrypted queries +- [Searchable encryption queries](/stack/cipherstash/encryption/queries): Equality, match, and range query patterns +- [Planning guide](/stack/reference/planning-guide): Architecture decisions and integration path selection diff --git a/content/stack/cipherstash/proxy/message-flow.mdx b/content/stack/cipherstash/proxy/message-flow.mdx new file mode 100644 index 0000000..a7a6f56 --- /dev/null +++ b/content/stack/cipherstash/proxy/message-flow.mdx @@ -0,0 +1,75 @@ +--- +title: Message flow +description: How CipherStash Proxy handles PostgreSQL Parse and Bind messages to transparently encrypt and decrypt query parameters. +--- + +This page explains the internal message handling flow for advanced users debugging unmappable statements or unexpected proxy behaviour. CipherStash Proxy sits between your application and PostgreSQL and intercepts the PostgreSQL extended query protocol. It encrypts parameters before they reach the database and decrypts values before they reach your application. + +## Extended query protocol overview + +The PostgreSQL extended query protocol uses a sequence of messages to execute parameterised queries. The two most relevant messages for encryption are: + +- **Parse** — the client sends a SQL statement with parameter placeholders (`$1`, `$2`, ...). Proxy inspects the statement and maps column references to their encryption config. +- **Bind** — the client sends parameter values to bind to a prepared statement. If Proxy recognised the statement during Parse, it encrypts the parameters here before forwarding them. + +## Parse flow + +When Proxy receives a Parse message, it determines whether the SQL statement references encrypted columns. + +![Parse message flow diagram](/images/proxy/parse.svg) + +1. Proxy checks whether the statement is encryptable (i.e., it references at least one column with an active encryption config). +2. If it is encryptable, Proxy maps the column references to their encryption configuration. +3. If the statement includes literal parameter values, Proxy rewrites them as encrypted values immediately. +4. Proxy adds the statement and its column config to the connection context for use during Bind. +5. Proxy forwards the (possibly rewritten) Parse message to PostgreSQL. + +If the statement is not encryptable (no encrypted columns referenced), Proxy forwards it unchanged. + +## Bind flow + +When Proxy receives a Bind message, it looks up the corresponding statement in the connection context. + +![Bind message flow diagram](/images/proxy/bind.svg) + +1. Proxy checks whether the statement that this Bind message references is in the context (i.e., was processed during Parse). +2. If it is, Proxy encrypts each parameter value according to the column config mapped during Parse. +3. Proxy rewrites the parameter values in the Bind message with the encrypted payloads. +4. Proxy creates a portal for the bound statement and adds it to the context. +5. Proxy forwards the rewritten Bind message to PostgreSQL. + +If the statement is not in context, Proxy creates a portal without encryption and forwards it unchanged. + +## Pipelining + +PostgreSQL supports pipelining: the client can send multiple messages without waiting for responses. Proxy must track Describe and Execute messages to correlate server responses with the right statements or portals, since responses arrive in order but may interleave. + +``` + Sequential Pipelined +| Client | Server | | Client | Server | +|----------------|-----------------| |----------------|-----------------| +| send query 1 | | | send query 1 | | +| | process query 1 | | send query 2 | process query 1 | +| receive rows 1 | | | send query 3 | process query 2 | +| send query 2 | | | receive rows 1 | process query 3 | +| | process query 2 | | receive rows 2 | | +| receive rows 2 | | | receive rows 3 | | +``` + +The PostgreSQL server always processes queries in sequential order, even when pipelined. Proxy preserves this ordering when encrypting parameters and decrypting results. + +## Unmappable statements + +Some statements cannot be mapped to an encryption config. This happens when: + +- The statement uses a function or expression that obscures the column reference (e.g., `CAST(email AS text)`) +- The column is referenced through a view, subquery, or CTE that Proxy cannot resolve +- The statement uses a SQL feature Proxy does not yet parse (e.g., certain `COPY` forms) + +When a statement is unmappable, Proxy forwards it to PostgreSQL unmodified. No encryption or decryption occurs. The `cipherstash_proxy_statements_unmappable_total` Prometheus metric tracks how often this happens. Enable `CS_LOG__MAPPER_LEVEL=debug` to see which statements are unmappable and why. + +## Related reference + +- [Proxy configuration reference](/stack/reference/proxy-reference) +- [Proxy errors](/stack/reference/proxy-errors) +- [Prometheus metrics](/stack/reference/proxy-reference#prometheus-metrics) diff --git a/content/stack/cipherstash/proxy/meta.json b/content/stack/cipherstash/proxy/meta.json index b486ce7..6093591 100644 --- a/content/stack/cipherstash/proxy/meta.json +++ b/content/stack/cipherstash/proxy/meta.json @@ -12,6 +12,7 @@ "searchable-json", "encrypt-tool", "audit", + "message-flow", "troubleshooting" ] } diff --git a/content/stack/cipherstash/supabase.mdx b/content/stack/cipherstash/supabase.mdx index c4939e4..cf68050 100644 --- a/content/stack/cipherstash/supabase.mdx +++ b/content/stack/cipherstash/supabase.mdx @@ -207,6 +207,10 @@ Full reference: [Drizzle ORM integration](/stack/cipherstash/encryption/drizzle) 4. **Identity-bound keys.** Tie encryption to a user's identity. Only that user can decrypt their data. 5. **Zero-knowledge.** CipherStash never sees your plaintext data. Keys are derived on your machine and never stored. + + Encrypted columns on Supabase require functional indexes and a specific query form to avoid silent sequential scans. See [Setting up indexes](/stack/cipherstash/encryption/indexes) for the correct `CREATE INDEX` statements and query patterns. + + ## Going to production Local development uses device-based authentication. Production uses environment variables. See [Going to production](/stack/deploy/going-to-production). diff --git a/content/stack/reference/discovery-session.mdx b/content/stack/reference/discovery-session.mdx new file mode 100644 index 0000000..dc10835 --- /dev/null +++ b/content/stack/reference/discovery-session.mdx @@ -0,0 +1,63 @@ +--- +title: Discovery session +description: What to prepare before your first conversation with the CipherStash team, and what to expect during the call. +--- + +# Discovery session + +A discovery session is a structured 60-minute conversation between your engineering or security team and CipherStash. The goal is to map your data security requirements to the right integration path and identify anything that needs attention before you start building. + +This page differs from the [planning guide](/stack/reference/planning-guide). The planning guide is self-serve technical reading you do before or after a session. This page is preparation for the conversation itself. + +## Who should attend + +Bring the people who can answer questions about your data architecture and compliance requirements. Typically: + +- An engineer who owns the data layer or ORM setup +- A security, compliance, or privacy lead (if separate from engineering) + +You do not need to have any CipherStash code written yet. + +## What to prepare + +Work through the following before the session. You do not need written answers. Thinking through these areas in advance makes the conversation more productive. + +### Current data security posture + +- Which sensitive fields does your application store (PII, payment data, health records)? +- Are those fields encrypted today? If so, at what layer (disk, TLS, application)? +- Do you have column-level or field-level encryption anywhere? + +### Regulated data inventory + +- Which regulations apply to your data (GDPR, HIPAA, PCI-DSS, SOC 2, BDSG)? +- Which specific fields are in scope for each regulation? +- Do you have data residency requirements (EU-only, US-only)? + +### Target outcomes + +- What is the threat model you are trying to address (breach, insider access, accidental exposure)? +- Do you need searchable encrypted fields, or encrypt-only? +- Do you need per-user encryption (identity-aware, lock contexts)? +- What does success look like at 30 days, 90 days? + +### Architecture constraints + +- Which database are you using (PostgreSQL self-hosted, Supabase, RDS, DynamoDB)? +- Which ORM or query layer sits above it (Drizzle, Prisma, raw SQL, Supabase JS SDK)? +- Do you use a connection proxy or PgBouncer? +- What is your deployment environment (Vercel, AWS Lambda, containers, bare metal)? +- Do you have restrictions on native Node.js modules or binary dependencies? + +## What to expect during the session + +1. **Context gathering (15 min).** The CipherStash team walks through the areas above with you. No slides, no sales deck. +2. **Integration path recommendation (20 min).** Based on your database and ORM, the team recommends one of: Proxy (zero code changes), Encryption SDK (application-layer control), Drizzle adapter, or Supabase wrapper. See [the PostgreSQL options overview](/stack/cipherstash/postgres) for a preview of this decision. +3. **Key questions and blockers (15 min).** Open discussion about anything that could block adoption: compliance requirements, deployment constraints, managed database limitations. +4. **Next steps (10 min).** Concrete actions for both sides, with timelines. + +You will leave with a clear recommended path, answers to your blockers, and a point of contact for technical questions during your trial. + +## Book a session + +[Contact the CipherStash team](https://cipherstash.com/contact) to schedule a discovery session. diff --git a/content/stack/reference/drizzle.mdx b/content/stack/reference/drizzle.mdx new file mode 100644 index 0000000..c7d67bd --- /dev/null +++ b/content/stack/reference/drizzle.mdx @@ -0,0 +1,140 @@ +--- +title: Drizzle adapter reference +description: Encrypted query operators, schema extraction, EQL migration generation, and API surface for @cipherstash/stack/drizzle. +--- + +`@cipherstash/stack/drizzle` integrates CipherStash field-level encryption with Drizzle ORM. It provides a custom column type for encrypted fields and drop-in query operators that encrypt search values before they reach PostgreSQL. This page covers the operators, batching patterns, and migration generation. The step-by-step integration guide is at [Drizzle integration guide](/stack/cipherstash/encryption/drizzle). Full type signatures live in the [auto-generated API reference](/stack/reference/stack/latest/packages/stack/src/drizzle). + +## Public entry points + +| Export | Purpose | +|---|---| +| [`encryptedType`](/stack/reference/stack/latest/packages/stack/src/drizzle/functions/encryptedType) | Custom Drizzle column type for an encrypted field. Accepts a `dataType` and index config. | +| [`extractEncryptionSchema`](/stack/reference/stack/latest/packages/stack/src/drizzle/functions/extractEncryptionSchema) | Converts a Drizzle `pgTable` definition into a CipherStash `EncryptedTable` schema for the SDK. | +| [`createEncryptionOperators`](/stack/reference/stack/latest/packages/stack/src/drizzle/functions/createEncryptionOperators) | Returns an object with all Drizzle query operators wrapped for encrypted columns. | +| [`EncryptedColumnConfig`](/stack/reference/stack/latest/packages/stack/src/drizzle/type-aliases/EncryptedColumnConfig) | Type alias for the column configuration object (`dataType`, `equality`, `freeTextSearch`, `orderAndRange`, `searchableJson`). | +| [`EncryptionConfigError`](/stack/reference/stack/latest/packages/stack/src/drizzle/classes/EncryptionConfigError) | Thrown when a column lacks the index required by an operator. | +| [`EncryptionOperatorError`](/stack/reference/stack/latest/packages/stack/src/drizzle/classes/EncryptionOperatorError) | Thrown for operator-level failures (invalid arguments, unsupported operations). | + +## Encrypted query operators + +`createEncryptionOperators` returns a set of operators that mirror the standard Drizzle operator names. Each operator encrypts the search value before constructing the SQL fragment. + +**Key pattern:** Most operators are async. `await` the operator call in the `.where()` clause, or pass un-awaited operators to `encryptionOps.and()` / `encryptionOps.or()` for batching. + +| Operator | EQL function / mechanism | Required column index | Notes | +|---|---|---|---| +| `eq(col, value)` | PostgreSQL `=` on `eql_v2_encrypted` | `equality: true` | Also accepts `orderAndRange: true` | +| `ne(col, value)` | PostgreSQL `!=` on `eql_v2_encrypted` | `equality: true` | Also accepts `orderAndRange: true` | +| `like(col, pattern)` | Bloom filter via `eql_v2_encrypted` | `freeTextSearch: true` | Case sensitivity depends on token filter config | +| `ilike(col, pattern)` | Bloom filter via `eql_v2_encrypted` | `freeTextSearch: true` | Case sensitivity depends on token filter config | +| `notIlike(col, pattern)` | Bloom filter via `eql_v2_encrypted` | `freeTextSearch: true` | | +| `gt(col, value)` | `eql_v2.gt()` ORE function | `orderAndRange: true` | | +| `gte(col, value)` | `eql_v2.gte()` ORE function | `orderAndRange: true` | | +| `lt(col, value)` | `eql_v2.lt()` ORE function | `orderAndRange: true` | | +| `lte(col, value)` | `eql_v2.lte()` ORE function | `orderAndRange: true` | | +| `between(col, min, max)` | `eql_v2.gte()` + `eql_v2.lte()` | `orderAndRange: true` | Inclusive | +| `notBetween(col, min, max)` | ORE negation | `orderAndRange: true` | | +| `inArray(col, values)` | Multiple equality encryptions | `equality: true` | | +| `notInArray(col, values)` | Multiple equality encryptions | `equality: true` | | +| `asc(col)` | ORE sort expression | `orderAndRange: true` | Sync, no `await` needed | +| `desc(col)` | ORE sort expression | `orderAndRange: true` | Sync, no `await` needed | +| `jsonbPathExists(col, path)` | `eql_v2.jsonb_path_exists()` | `searchableJson: true` | Returns boolean for use in `WHERE` | +| `jsonbPathQueryFirst(col, path)` | `eql_v2.jsonb_path_query_first()` | `searchableJson: true` | Returns encrypted value for use in `SELECT` | +| `jsonbGet(col, path)` | `->` operator on `eql_v2_encrypted` | `searchableJson: true` | Returns encrypted value for use in `SELECT` | + +Non-encrypted columns fall back to the standard Drizzle operator automatically. + + + Sorting encrypted columns with `asc()` or `desc()` requires operator family support in the database. On managed databases (Supabase, RDS) or when EQL is installed with `--exclude-operator-family`, sort application-side after decrypting instead. + + +```typescript filename="queries.ts" +import { drizzle } from "drizzle-orm/postgres-js" +import { Encryption } from "@cipherstash/stack" +import { extractEncryptionSchema, createEncryptionOperators } from "@cipherstash/stack/drizzle" +import { usersTable } from "./schema" +import postgres from "postgres" + +const usersSchema = extractEncryptionSchema(usersTable) +const client = await Encryption({ schemas: [usersSchema] }) +const ops = createEncryptionOperators(client) +const db = drizzle({ client: postgres(process.env.DATABASE_URL!) }) + +// Equality lookup +const exact = await db + .select() + .from(usersTable) + .where(await ops.eq(usersTable.email, "alice@example.com")) + +// Range query on an encrypted number column +const adults = await db + .select() + .from(usersTable) + .where(await ops.gte(usersTable.age, 18)) +``` + +## Batching conditions with `and` and `or` + +Passing multiple operators to `encryptionOps.and()` or `encryptionOps.or()` batches all encryption into a single ZeroKMS call. This is more efficient than `await`ing each operator separately. + +Pass each operator without `await` as an argument to `and()` or `or()`, then `await` the outer call. + +```typescript filename="batched-query.ts" +// All three encryptions happen in one ZeroKMS call +const results = await db + .select() + .from(usersTable) + .where( + await ops.and( + ops.gte(usersTable.age, 18), + ops.lte(usersTable.age, 65), + ops.ilike(usersTable.email, "%@example.com"), + ), + ) +``` + +Both `and()` and `or()` filter out `undefined` conditions, which makes conditional query building safe: + +```typescript filename="conditional-query.ts" +const results = await db + .select() + .from(usersTable) + .where( + await ops.and( + searchEmail ? ops.ilike(usersTable.email, `%${searchEmail}%`) : undefined, + ops.gte(usersTable.age, minAge), + ), + ) +``` + +## EQL migration generation + +`extractEncryptionSchema` produces a CipherStash schema object from your Drizzle table. The `@cipherstash/cli` uses this schema to generate the EQL database migration that installs the required PostgreSQL indexes. + +Run the migration generator after defining your table: + +```bash +npx @cipherstash/cli db install +``` + +The CLI reads your Drizzle config and calls `extractEncryptionSchema` internally to determine which columns need EQL indexes. It then produces a timestamped SQL migration file in your Drizzle migrations directory. + +See the [CipherStash CLI reference](/stack/cipherstash/cli) for all `db install` options. + +## Cross-links + +- Integration guide: [Drizzle integration guide](/stack/cipherstash/encryption/drizzle) +- Index types: [Encrypted indexes](/stack/cipherstash/encryption/indexes) +- Query patterns: [Encrypted queries](/stack/cipherstash/encryption/queries) +- PostgreSQL setup: [Postgres setup](/stack/cipherstash/postgres) + +## Full API surface + +Everything else is in the auto-generated TypeDoc reference: + +- [Drizzle module](/stack/reference/stack/latest/packages/stack/src/drizzle) — all exports +- [`encryptedType`](/stack/reference/stack/latest/packages/stack/src/drizzle/functions/encryptedType) — column builder +- [`extractEncryptionSchema`](/stack/reference/stack/latest/packages/stack/src/drizzle/functions/extractEncryptionSchema) — schema conversion +- [`createEncryptionOperators`](/stack/reference/stack/latest/packages/stack/src/drizzle/functions/createEncryptionOperators) — operator factory +- [`EncryptedColumnConfig`](/stack/reference/stack/latest/packages/stack/src/drizzle/type-aliases/EncryptedColumnConfig) — column config type diff --git a/content/stack/reference/encryption-sdk.mdx b/content/stack/reference/encryption-sdk.mdx new file mode 100644 index 0000000..1abfb63 --- /dev/null +++ b/content/stack/reference/encryption-sdk.mdx @@ -0,0 +1,116 @@ +--- +title: Encryption SDK reference +description: Public entry points, supported data types, and configuration highlights for @cipherstash/stack field-level encryption. +--- + +`@cipherstash/stack` is CipherStash's field-level encryption SDK for TypeScript. It encrypts individual column values client-side using per-value keys derived from ZeroKMS (backed by AWS KMS), before data leaves the application. This page summarises the public surface, data type rules, and configuration options. Full type signatures live in the [auto-generated API reference](/stack/reference/stack/latest/packages/stack/src/encryption). + +## Public entry points + +| Export | Import path | Purpose | +|---|---|---| +| `Encryption(config)` | `@cipherstash/stack` | Factory function. Returns a `Promise`. [Reference](/stack/reference/stack/latest/packages/stack/src/encryption/functions/Encryption) | +| `EncryptionClient` | `@cipherstash/stack/encryption` | Class with all encrypt/decrypt methods. Obtain via `Encryption()`, not `new`. [Reference](/stack/reference/stack/latest/packages/stack/src/encryption/classes/EncryptionClient) | +| `encryptedTable` / `encryptedColumn` / `encryptedField` | `@cipherstash/stack/schema` | Schema builders. Define which tables and columns to encrypt, and which search indexes to create. [Reference](/stack/reference/stack/latest/packages/stack/src/schema) | +| `LockContext` | `@cipherstash/stack/identity` | Identity-aware encryption. Ties an encrypted value to a specific JWT identity. [Reference](/stack/reference/stack/latest/packages/stack/src/identity) | +| `Secrets` | `@cipherstash/stack/secrets` | End-to-end encrypted secret storage. Separate from field-level encryption. [Reference](/stack/reference/stack/latest/packages/stack/src/types-public) | +| Error types | `@cipherstash/stack/errors` | `StackError`, `EncryptionErrorTypes`, `getErrorMessage`. [Reference](/stack/reference/stack/latest/packages/stack/src/types-public) | + +## Adapter packages + +| Adapter | Import path | Guide | +|---|---|---| +| Drizzle ORM | `@cipherstash/stack/drizzle` | [Drizzle guide](/stack/cipherstash/encryption/drizzle) | +| DynamoDB | `@cipherstash/stack/dynamodb` | [DynamoDB guide](/stack/cipherstash/encryption/dynamodb) | +| Supabase | `@cipherstash/stack/supabase` | [Supabase guide](/stack/cipherstash/encryption/supabase) | + +## Supported data types + +Each encrypted column has a declared `dataType`. This tells the SDK how to serialise the value before encryption and how to deserialise it after decryption. + +| Data type | `dataType()` value | Cast required? | +|---|---|---| +| String | `"string"` (default) | No | +| Text (alias) | `"text"` | No | +| Number | `"number"` | Yes | +| Bigint | `"bigint"` | Yes | +| Boolean | `"boolean"` | Yes | +| Date | `"date"` | Yes | +| JSON | `"json"` | Yes | + +**Why casting is required for non-string types.** The schema describes the shape of a column but not the runtime value. When you encrypt a `number`, the SDK serialises it as a number so that ORE (Order-Revealing Encryption) indexes preserve ordering. Without an explicit `dataType`, the SDK defaults to string serialisation. Mixing declared types and actual values produces decryption errors, so always set `dataType` when the column holds a non-string value. + +```typescript filename="schema.ts" +import { encryptedTable, encryptedColumn } from "@cipherstash/stack/schema" + +const users = encryptedTable("users", { + email: encryptedColumn("email").equality().freeTextSearch(), + age: encryptedColumn("age").dataType("number").orderAndRange(), + metadata: encryptedColumn("metadata").dataType("json").searchableJson(), +}) +``` + +## Special-value handling + +Some numeric inputs are invalid for encryption. The following table covers the behaviours documented and validated by the SDK. + +| Input | Result | +|---|---| +| `NaN` | Error (rejected before encryption) | +| `Infinity` / `-Infinity` | Error (rejected before encryption) | + + + Passing `NaN` or `Infinity` to an encrypted numeric column throws at the operation level, not at the type level. TypeScript will not catch these at compile time. Validate your inputs before calling `encrypt` or `encryptModel`. + + +## `ProtectClientConfig` highlights + +`Encryption()` accepts an `EncryptionClientConfig` object. The `config` field is a `ClientConfig`. All credentials fall back to environment variables when omitted. + +| Option | Env variable | Description | +|---|---|---| +| `workspaceCrn` | `CS_WORKSPACE_CRN` | Workspace Cloud Resource Name. Format: `crn:.aws:` | +| `accessKey` | `CS_CLIENT_ACCESS_KEY` | API access key for authenticating with CipherStash | +| `clientId` | `CS_CLIENT_ID` | Client identifier generated during workspace onboarding | +| `clientKey` | `CS_CLIENT_KEY` | Client key material used for ZeroKMS encryption operations | +| `keyset` | (none) | Multi-tenant isolation. Specify `{ name: "tenant-a" }` or `{ id: "" }` | + +See the full type at [EncryptionClientConfig](/stack/reference/stack/latest/packages/stack/src/types-public/type-aliases/EncryptionClientConfig) and [ClientConfig](/stack/reference/stack/latest/packages/stack/src/types-public/type-aliases/ClientConfig). + +```typescript filename="init.ts" +import { Encryption } from "@cipherstash/stack" +import { users } from "./schema" + +// Reads CS_* env vars automatically when config is omitted +const client = await Encryption({ schemas: [users] }) + +// Or pass credentials explicitly +const client = await Encryption({ + schemas: [users], + config: { + workspaceCrn: "crn:ap-southeast-2.aws:your-workspace-id", + clientId: "your-client-id", + clientKey: "your-client-key", + accessKey: "your-access-key", + }, +}) +``` + +## Logging + +Set `STASH_STACK_LOG` to control log verbosity. The SDK never logs plaintext data. + +| Value | Output | +|---|---| +| `error` (default) | Errors only | +| `info` | Info and errors | +| `debug` | Debug, info, and errors | + +## Full API surface + +Everything else is in the auto-generated TypeDoc reference: + +- [Encryption module](/stack/reference/stack/latest/packages/stack/src/encryption) — `Encryption()`, `EncryptionClient` class, all methods +- [Schema module](/stack/reference/stack/latest/packages/stack/src/schema) — `encryptedTable`, `encryptedColumn`, `encryptedField`, type inference helpers +- [Identity module](/stack/reference/stack/latest/packages/stack/src/identity) — `LockContext` +- [Types](/stack/reference/stack/latest/packages/stack/src/types-public) — `EncryptionClientConfig`, `ClientConfig`, `EncryptOptions`, `BulkEncryptPayload`, and more diff --git a/content/stack/reference/meta.json b/content/stack/reference/meta.json index dedf113..00accdb 100644 --- a/content/stack/reference/meta.json +++ b/content/stack/reference/meta.json @@ -8,12 +8,15 @@ "---Architecture---", "what-is-cipherstash", "security-architecture", + "discovery-session", "planning-guide", "compliance", "---Use Cases---", "use-cases", "---API & SDK---", "eql-guide", + "encryption-sdk", + "drizzle", "error-handling", "migration", "proxy-reference", diff --git a/content/stack/reference/proxy-reference.mdx b/content/stack/reference/proxy-reference.mdx index ecdfe40..bc8cc6d 100644 --- a/content/stack/reference/proxy-reference.mdx +++ b/content/stack/reference/proxy-reference.mdx @@ -138,7 +138,7 @@ port = "9930" ## Development settings -Default settings are tuned for production. When running Proxy locally, override these for a better development experience. +Default settings are tuned for production. When running Proxy locally, override these for a better development experience. None of these settings are appropriate for production. Enable colored, human-readable logs: @@ -154,6 +154,29 @@ CS_DATABASE__CONFIG_RELOAD_INTERVAL="10" CS_DATABASE__SCHEMA_RELOAD_INTERVAL="10" ``` +### Slow query logging + +Slow query logging helps you identify statements that take longer than expected. Enable it with: + +```bash +CS_LOG__SLOW_STATEMENTS="true" +``` + +By default, any statement taking longer than 2000 ms is flagged. Tune the thresholds to match your latency expectations: + +```bash +CS_LOG__SLOW_STATEMENT_MIN_DURATION_MS="500" # statement total time +CS_LOG__SLOW_DB_RESPONSE_MIN_DURATION_MS="200" # database round-trip only +``` + +When a slow statement is detected, Proxy emits a structured log line at the `SLOW_STATEMENTS` target. With `format = "pretty"`, it looks like: + +``` +WARN slow_statement duration_ms=620 query="SELECT * FROM users WHERE ..." +``` + +Use these log lines to identify queries that need indexes or to spot unexpectedly slow EQL operations. To isolate slow-statement output in production, set `CS_LOG__SLOW_STATEMENTS_LEVEL=warn` while keeping the global level at `error`. + ## Docker-specific options These environment variables are only available in the Docker container image and are not present in the binary. diff --git a/public/images/proxy/bind.svg b/public/images/proxy/bind.svg new file mode 100644 index 0000000..3fa7007 --- /dev/null +++ b/public/images/proxy/bind.svg @@ -0,0 +1 @@ +

Yes

No

Bind

Statement in Context

Encrypt

Rewrite params

Create Portal

Add to Context

Write

\ No newline at end of file diff --git a/public/images/proxy/parse.svg b/public/images/proxy/parse.svg new file mode 100644 index 0000000..eafdbf9 --- /dev/null +++ b/public/images/proxy/parse.svg @@ -0,0 +1 @@ +

Yes

No

Yes

No

Parse

Encryptable

Map column config

Has params

Write

Rewrite params

Add to Context

\ No newline at end of file