= ({
}
return (
-
- {
- rangeRef.current = newRange
- }}
- data={flattenedItems}
- itemContent={(index) => renderRow(index)}
- style={{ height: "100%" }}
- />
-
+ <>
+
+ {
+ rangeRef.current = newRange
+ }}
+ data={flattenedItems}
+ itemContent={(index) => renderRow(index)}
+ style={{ height: "100%" }}
+ />
+
+ >
)
}
diff --git a/src/services/search.ts b/src/services/search.ts
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/store/Query/actions.ts b/src/store/Query/actions.ts
index 158b7d13b..dcb6ce64a 100644
--- a/src/store/Query/actions.ts
+++ b/src/store/Query/actions.ts
@@ -126,6 +126,13 @@ const setQueriesToRun = (payload: QueriesToRun): QueryAction => ({
payload,
})
+const setAISuggestionRequest = (
+ payload: { query: string; startOffset: number } | null,
+): QueryAction => ({
+ type: QueryAT.SET_AI_SUGGESTION_REQUEST,
+ payload,
+})
+
export default {
addNotification,
cleanupNotifications,
@@ -139,4 +146,5 @@ export default {
setColumns,
setActiveNotification,
setQueriesToRun,
+ setAISuggestionRequest,
}
diff --git a/src/store/Query/reducers.ts b/src/store/Query/reducers.ts
index 4019cfea7..e59f368f1 100644
--- a/src/store/Query/reducers.ts
+++ b/src/store/Query/reducers.ts
@@ -33,6 +33,7 @@ export const initialState: QueryStateShape = {
queryNotifications: {},
activeNotification: null,
queriesToRun: [],
+ aiSuggestionRequest: null,
}
const query = (state = initialState, action: QueryAction): QueryStateShape => {
@@ -265,6 +266,14 @@ const query = (state = initialState, action: QueryAction): QueryStateShape => {
queriesToRun: action.payload,
}
}
+
+ case QueryAT.SET_AI_SUGGESTION_REQUEST: {
+ return {
+ ...state,
+ aiSuggestionRequest: action.payload,
+ }
+ }
+
default:
return state
}
diff --git a/src/store/Query/selectors.ts b/src/store/Query/selectors.ts
index 5152616c8..2acfb55df 100644
--- a/src/store/Query/selectors.ts
+++ b/src/store/Query/selectors.ts
@@ -28,6 +28,7 @@ import {
StoreShape,
QueryNotifications,
QueriesToRun,
+ AISuggestionRequest,
} from "types"
import type {
QueryRawResult,
@@ -68,6 +69,10 @@ const getColumns: (
store: StoreShape,
) => Record = (store) => store.query.columns
+const getAISuggestionRequest: (
+ store: StoreShape,
+) => AISuggestionRequest | null = (store) => store.query.aiSuggestionRequest
+
export default {
getNotifications,
getQueryNotifications,
@@ -78,4 +83,5 @@ export default {
getRunning,
getTables,
getColumns,
+ getAISuggestionRequest,
}
diff --git a/src/store/Query/types.ts b/src/store/Query/types.ts
index b12c82098..718c7135d 100644
--- a/src/store/Query/types.ts
+++ b/src/store/Query/types.ts
@@ -1,27 +1,3 @@
-/*******************************************************************************
- * ___ _ ____ ____
- * / _ \ _ _ ___ ___| |_| _ \| __ )
- * | | | | | | |/ _ \/ __| __| | | | _ \
- * | |_| | |_| | __/\__ \ |_| |_| | |_) |
- * \__\_\\__,_|\___||___/\__|____/|____/
- *
- * Copyright (c) 2014-2019 Appsicle
- * Copyright (c) 2019-2022 QuestDB
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- ******************************************************************************/
-
import type { ReactNode } from "react"
import type {
@@ -46,6 +22,7 @@ export enum RunningType {
EXPLAIN = "explain",
REFRESH = "refresh",
QUERY = "query",
+ AI_SUGGESTION = "ai_suggestion",
NONE = "none",
}
@@ -70,6 +47,11 @@ export type QueryNotifications = Readonly<{
explain?: NotificationShape
}>
+export type AISuggestionRequest = Readonly<{
+ query: string
+ startOffset: number
+}>
+
export type QueryStateShape = Readonly<{
notifications: NotificationShape[]
tables: Table[]
@@ -79,6 +61,7 @@ export type QueryStateShape = Readonly<{
queryNotifications: Record>
activeNotification: NotificationShape | null
queriesToRun: QueriesToRun
+ aiSuggestionRequest: AISuggestionRequest | null
}>
export enum QueryAT {
@@ -94,6 +77,7 @@ export enum QueryAT {
SET_COLUMNS = "QUERY/SET_COLUMNS",
SET_ACTIVE_NOTIFICATION = "QUERY/SET_ACTIVE_NOTIFICATION",
SET_QUERIES_TO_RUN = "QUERY/SET_QUERIES_TO_RUN",
+ SET_AI_SUGGESTION_REQUEST = "QUERY/SET_AI_SUGGESTION_REQUEST",
}
type AddNotificationAction = Readonly<{
@@ -165,6 +149,11 @@ type SetQueriesToRunAction = Readonly<{
payload: QueriesToRun
}>
+type SetAISuggestionRequestAction = Readonly<{
+ type: QueryAT.SET_AI_SUGGESTION_REQUEST
+ payload: AISuggestionRequest | null
+}>
+
export type QueryAction =
| AddNotificationAction
| CleanupNotificationsAction
@@ -178,3 +167,4 @@ export type QueryAction =
| SetColumnsActions
| SetActiveNotificationAction
| SetQueriesToRunAction
+ | SetAISuggestionRequestAction
diff --git a/src/store/aiConversations.ts b/src/store/aiConversations.ts
new file mode 100644
index 000000000..4469fe9be
--- /dev/null
+++ b/src/store/aiConversations.ts
@@ -0,0 +1,48 @@
+import { db, ConversationMeta } from "./db"
+import { compressMessages, decompressMessages } from "./compression"
+import type {
+ AIConversation,
+ ConversationMessage,
+ ConversationId,
+} from "../providers/AIConversationProvider/types"
+
+export const aiConversationStore = {
+ getAllMetas: () => db.ai_conversations.toArray(),
+
+ async getMessages(
+ conversationId: ConversationId,
+ ): Promise {
+ const record = await db.ai_conversation_messages.get(conversationId)
+ return record ? decompressMessages(record.data) : []
+ },
+
+ saveMeta: (meta: ConversationMeta) => db.ai_conversations.put(meta),
+
+ saveMessages: (
+ conversationId: ConversationId,
+ messages: ConversationMessage[],
+ ) =>
+ db.ai_conversation_messages.put({
+ conversationId,
+ data: compressMessages(messages),
+ }),
+
+ async saveConversation(conversation: AIConversation) {
+ const { messages, ...meta } = conversation
+ await Promise.all([
+ this.saveMeta(meta),
+ this.saveMessages(conversation.id, messages),
+ ])
+ },
+
+ deleteConversation: (conversationId: ConversationId) =>
+ Promise.all([
+ db.ai_conversations.delete(conversationId),
+ db.ai_conversation_messages.delete(conversationId),
+ ]),
+
+ updateMeta: (
+ conversationId: ConversationId,
+ updates: Partial,
+ ) => db.ai_conversations.update(conversationId, updates),
+}
diff --git a/src/store/buffers.ts b/src/store/buffers.ts
index 518844663..a1657ab81 100644
--- a/src/store/buffers.ts
+++ b/src/store/buffers.ts
@@ -64,6 +64,13 @@ export type Buffer = {
editorViewState?: editor.ICodeEditorViewState
metricsViewState?: MetricsViewState
isTemporary?: boolean
+ isDiffBuffer?: boolean
+ diffContent?: {
+ original: string
+ modified: string
+ queryStartOffset: number
+ conversationId?: string
+ }
}
const defaultEditorViewState: editor.ICodeEditorViewState = {
@@ -109,6 +116,8 @@ export const makeBuffer = ({
archived,
archivedAt,
isTemporary,
+ isDiffBuffer,
+ diffContent,
}: {
label: string
value?: string
@@ -118,6 +127,13 @@ export const makeBuffer = ({
archived?: boolean
archivedAt?: number
isTemporary?: boolean
+ isDiffBuffer?: boolean
+ diffContent?: {
+ original: string
+ modified: string
+ queryStartOffset: number
+ conversationId?: string
+ }
}): Omit => ({
label,
value: value ?? "",
@@ -127,6 +143,8 @@ export const makeBuffer = ({
archived,
archivedAt,
isTemporary,
+ isDiffBuffer,
+ diffContent,
})
export const makeFallbackBuffer = (bufferType: BufferType): Buffer => {
diff --git a/src/store/compression.ts b/src/store/compression.ts
new file mode 100644
index 000000000..2a4798fc1
--- /dev/null
+++ b/src/store/compression.ts
@@ -0,0 +1,18 @@
+import { compressSync, decompressSync, strToU8, strFromU8 } from "fflate"
+import type { ConversationMessage } from "../providers/AIConversationProvider/types"
+
+export function compressMessages(messages: ConversationMessage[]): Uint8Array {
+ return compressSync(strToU8(JSON.stringify(messages)))
+}
+
+export function decompressMessages(data: Uint8Array): ConversationMessage[] {
+ try {
+ const result = JSON.parse(
+ strFromU8(decompressSync(data)),
+ ) as ConversationMessage[]
+ return result
+ } catch (error) {
+ console.error("Failed to decompress messages:", error)
+ return []
+ }
+}
diff --git a/src/store/db.ts b/src/store/db.ts
index faf79aba8..b1291baac 100644
--- a/src/store/db.ts
+++ b/src/store/db.ts
@@ -28,16 +28,30 @@ import type { Buffer } from "./buffers"
import { makeBuffer, fallbackBuffer } from "./buffers"
import { StoreKey } from "../utils/localStorage/types"
import { getValue } from "../utils/localStorage"
+import type { AIConversation } from "../providers/AIConversationProvider/types"
type EditorSettings = {
key: string
value: string | number
}
+export type ConversationMeta = Omit
+
+export type ConversationMetaWithStatus = ConversationMeta & {
+ hasMessages: boolean
+}
+
+export type PersistedMessages = {
+ conversationId: string
+ data: Uint8Array
+}
+
export class Storage extends Dexie {
buffers!: Table
editor_settings!: Table
read_notifications!: Table<{ newsId: string }, number>
+ ai_conversations!: Table
+ ai_conversation_messages!: Table
ready: boolean = false
constructor() {
@@ -63,6 +77,10 @@ export class Storage extends Dexie {
counter++
})
})
+ this.version(4).stores({
+ ai_conversations: "id, bufferId, tableId, updatedAt, queryKey",
+ ai_conversation_messages: "conversationId",
+ })
// add initial buffer on db creation
// this is only called once, when DB is not available yet
this.on("populate", async () => {
diff --git a/src/theme/index.ts b/src/theme/index.ts
index 618212065..1cd67466c 100644
--- a/src/theme/index.ts
+++ b/src/theme/index.ts
@@ -30,18 +30,22 @@ const color: ColorShape = {
black70: "rgba(25, 26, 33, 0.7)",
black40: "rgba(25, 26, 33, 0.4)",
black20: "rgba(25, 26, 33, 0.2)",
+ overlayBackground: "rgba(44, 46, 61, 0.48)",
gray1: "#585858",
gray2: "#bbbbbb",
backgroundDarker: "#21222c",
+ chatBackground: "#1D1E25",
backgroundLighter: "#282a36",
- background: "#21222c",
+ background: "#2d303e",
foreground: "#f8f8f2",
selection: "#44475a",
selectionDarker: "#333544",
+ midnight: "#141725",
comment: "#6272a4",
red: "#ff5555",
redDark: "#5a1d1d",
loginBackground: "#1D070E",
+ orangeDark: "#ff7f2a",
orange: "#ffb86c",
yellow: "#f1fa8c",
green: "#50fa7b",
@@ -85,4 +89,9 @@ export const theme: DefaultThemeShape = {
borderRadius: "0.8rem",
}
+export const pinkLinearGradientHorizontal =
+ "linear-gradient(90deg, #D14671 0%, #892C6C 100%)"
+export const pinkLinearGradientVertical =
+ "linear-gradient(180deg, #D14671 0%, #892C6C 100%)"
+
export type ThemeShape = typeof theme
diff --git a/src/types/styled.d.ts b/src/types/styled.d.ts
index 4c3837750..81b27c841 100644
--- a/src/types/styled.d.ts
+++ b/src/types/styled.d.ts
@@ -27,20 +27,24 @@ import "styled-components"
export type ColorShape = {
black: string
black70: string
+ overlayBackground: string
black40: string
black20: string
gray1: string
gray2: string
backgroundLighter: string
+ chatBackground: string
backgroundDarker: string
background: string
foreground: string
selection: string
selectionDarker: string
comment: string
+ midnight: string
red: string
redDark: string
loginBackground: string
+ orangeDark: string
orange: string
yellow: string
green: string
diff --git a/src/utils/aiAssistant.ts b/src/utils/aiAssistant.ts
new file mode 100644
index 000000000..ddf2e1eff
--- /dev/null
+++ b/src/utils/aiAssistant.ts
@@ -0,0 +1,1764 @@
+import Anthropic from "@anthropic-ai/sdk"
+import OpenAI from "openai"
+import { Client } from "./questdb/client"
+import { Type } from "./questdb/types"
+import { getModelProps, MODEL_OPTIONS } from "./aiAssistantSettings"
+import type { ModelOption, Provider } from "./aiAssistantSettings"
+import { formatSql } from "./formatSql"
+import { AIOperationStatus, StatusArgs } from "../providers/AIStatusProvider"
+import {
+ getQuestDBTableOfContents,
+ getSpecificDocumentation,
+ parseDocItems,
+ DocCategory,
+} from "./questdbDocsRetrieval"
+import { MessageParam } from "@anthropic-ai/sdk/resources/messages"
+import type {
+ ResponseOutputItem,
+ ResponseTextConfig,
+} from "openai/resources/responses/responses"
+import type { Tool as AnthropicTool } from "@anthropic-ai/sdk/resources/messages"
+import type {
+ ConversationId,
+ ConversationMessage,
+} from "../providers/AIConversationProvider/types"
+import { compactConversationIfNeeded } from "./contextCompaction"
+import { COMPACTION_THRESHOLDS } from "./tokenCounting"
+
+export type ActiveProviderSettings = {
+ model: string
+ provider: Provider
+ apiKey: string
+}
+
+export interface AiAssistantAPIError {
+ type: "rate_limit" | "invalid_key" | "network" | "unknown" | "aborted"
+ message: string
+ details?: string
+}
+
+export interface AiAssistantExplanation {
+ explanation: string
+ tokenUsage?: TokenUsage
+}
+
+export type AiAssistantValidateQueryResult =
+ | { valid: true }
+ | { valid: false; error: string; position: number }
+
+export interface TableSchemaExplanation {
+ explanation: string
+ columns: Array<{
+ name: string
+ description: string
+ data_type: string
+ }>
+ storage_details: string[]
+ tokenUsage?: TokenUsage
+}
+
+export const schemaExplanationToMarkdown = (
+ explanation: TableSchemaExplanation,
+): string => {
+ let md = ""
+
+ md += `${explanation.explanation}\n\n`
+
+ if (explanation.columns.length > 0) {
+ md += `## Columns\n\n`
+ md += `| Column | Type | Description |\n`
+ md += `|--------|------|-------------|\n`
+ for (const col of explanation.columns) {
+ md += `| ${col.name} | \`${col.data_type}\` | ${col.description} |\n`
+ }
+ md += `\n`
+ }
+
+ if (explanation.storage_details.length > 0) {
+ md += `## Storage Details\n\n`
+ for (const detail of explanation.storage_details) {
+ md += `- ${detail}\n`
+ }
+ }
+
+ return md
+}
+
+export interface TokenUsage {
+ inputTokens: number
+ outputTokens: number
+}
+
+export interface GeneratedSQL {
+ sql: string | null
+ explanation?: string
+ tokenUsage?: TokenUsage
+}
+
+export interface ModelToolsClient {
+ validateQuery: (query: string) => Promise
+ getTables?: () => Promise>
+ getTableSchema?: (tableName: string) => Promise
+}
+
+export type StatusCallback = (
+ status: AIOperationStatus | null,
+ args?: StatusArgs,
+) => void
+
+type ProviderClients =
+ | {
+ provider: "anthropic"
+ anthropic: Anthropic
+ }
+ | {
+ provider: "openai"
+ openai: OpenAI
+ }
+
+const ExplainFormat: ResponseTextConfig = {
+ format: {
+ type: "json_schema" as const,
+ name: "explain_format",
+ schema: {
+ type: "object",
+ properties: {
+ explanation: { type: "string" },
+ },
+ required: ["explanation"],
+ additionalProperties: false,
+ },
+ strict: true,
+ },
+}
+
+const FixSQLFormat: ResponseTextConfig = {
+ format: {
+ type: "json_schema" as const,
+ name: "fix_sql_format",
+ schema: {
+ type: "object",
+ properties: {
+ sql: { type: ["string", "null"] },
+ explanation: { type: "string" },
+ },
+ required: ["explanation", "sql"],
+ additionalProperties: false,
+ },
+ strict: true,
+ },
+}
+
+const ExplainTableSchemaFormat: ResponseTextConfig = {
+ format: {
+ type: "json_schema" as const,
+ name: "explain_table_schema_format",
+ schema: {
+ type: "object",
+ properties: {
+ explanation: { type: "string" },
+ columns: {
+ type: "array",
+ items: {
+ type: "object",
+ properties: {
+ name: { type: "string" },
+ description: { type: "string" },
+ data_type: { type: "string" },
+ },
+ required: ["name", "description", "data_type"],
+ additionalProperties: false,
+ },
+ },
+ storage_details: {
+ type: "array",
+ items: { type: "string" },
+ },
+ },
+ required: ["explanation", "columns", "storage_details"],
+ additionalProperties: false,
+ },
+ strict: true,
+ },
+}
+
+const ConversationResponseFormat: ResponseTextConfig = {
+ format: {
+ type: "json_schema" as const,
+ name: "conversation_response_format",
+ schema: {
+ type: "object",
+ properties: {
+ sql: { type: ["string", "null"] },
+ explanation: { type: "string" },
+ },
+ required: ["explanation", "sql"],
+ additionalProperties: false,
+ },
+ strict: true,
+ },
+}
+
+const inferProviderFromModel = (model: string): Provider => {
+ const found: ModelOption | undefined = MODEL_OPTIONS.find(
+ (m) => m.value === model,
+ )
+ if (found) return found.provider
+ return model.startsWith("claude") ? "anthropic" : "openai"
+}
+
+const createProviderClients = (
+ settings: ActiveProviderSettings,
+): ProviderClients => {
+ if (!settings.apiKey) {
+ throw new Error(`No API key found for ${settings.provider}`)
+ }
+
+ if (settings.provider === "openai") {
+ return {
+ provider: settings.provider,
+ openai: new OpenAI({
+ apiKey: settings.apiKey,
+ dangerouslyAllowBrowser: true,
+ }),
+ }
+ }
+ return {
+ provider: settings.provider,
+ anthropic: new Anthropic({
+ apiKey: settings.apiKey,
+ dangerouslyAllowBrowser: true,
+ }),
+ }
+}
+
+const SCHEMA_TOOLS: Array = [
+ {
+ name: "get_tables",
+ description:
+ "Get a list of all tables and materialized views in the QuestDB database",
+ input_schema: {
+ type: "object" as const,
+ properties: {},
+ },
+ },
+ {
+ name: "get_table_schema",
+ description:
+ "Get the full schema definition (DDL) for a specific table or materialized view",
+ input_schema: {
+ type: "object" as const,
+ properties: {
+ table_name: {
+ type: "string" as const,
+ description:
+ "The name of the table or materialized view to get schema for",
+ },
+ },
+ required: ["table_name"],
+ },
+ },
+]
+
+const REFERENCE_TOOLS = [
+ {
+ name: "validate_query",
+ description:
+ "Validate the syntax correctness of a SQL query using QuestDB's SQL syntax validator. All generated SQL queries should be validated using this tool before responding to the user.",
+ input_schema: {
+ type: "object" as const,
+ properties: {
+ query: {
+ type: "string" as const,
+ description: "The SQL query to validate",
+ },
+ },
+ required: ["query"],
+ },
+ },
+ {
+ name: "get_questdb_toc",
+ description:
+ "Get a table of contents listing all available QuestDB functions, operators, and SQL keywords. Use this first to see what documentation is available before requesting specific items.",
+ input_schema: {
+ type: "object" as const,
+ properties: {},
+ },
+ },
+ {
+ name: "get_questdb_documentation",
+ description:
+ "Get documentation for specific QuestDB functions, operators, or SQL keywords. This is much more efficient than loading all documentation.",
+ input_schema: {
+ type: "object" as const,
+ properties: {
+ category: {
+ type: "string" as const,
+ enum: ["functions", "operators", "sql", "concepts", "schema"],
+ description: "The category of documentation to retrieve",
+ },
+ items: {
+ type: "array" as const,
+ items: {
+ type: "string" as const,
+ },
+ description:
+ "List of specific docs items in the category. IMPORTANT: Category of these items must match the category parameter. Name of these items should exactly match the entry in the table of contents you get with get_questdb_toc.",
+ },
+ },
+ required: ["category", "items"],
+ },
+ },
+]
+
+const ALL_TOOLS = [...SCHEMA_TOOLS, ...REFERENCE_TOOLS]
+
+const toOpenAIFunctions = (
+ tools: Array<{
+ name: string
+ description?: string
+ input_schema: AnthropicTool["input_schema"]
+ }>,
+) => {
+ return tools.map((t) => ({
+ type: "function" as const,
+ name: t.name,
+ description: t.description,
+ parameters: { ...t.input_schema, additionalProperties: false },
+ strict: true,
+ })) as OpenAI.Responses.Tool[]
+}
+
+export const normalizeSql = (sql: string, insertSemicolon: boolean = true) => {
+ if (!sql) return ""
+ let result = sql.trim()
+ if (result.endsWith(";")) {
+ result = result.slice(0, -1)
+ }
+ return formatSql(result) + (insertSemicolon ? ";" : "")
+}
+
+export function isAiAssistantError(
+ response:
+ | AiAssistantAPIError
+ | AiAssistantExplanation
+ | GeneratedSQL
+ | Partial,
+): response is AiAssistantAPIError {
+ if ("type" in response && "message" in response) {
+ return true
+ }
+ return false
+}
+
+export function createModelToolsClient(
+ questClient: Client,
+ tables?: Array<{ table_name: string; matView: boolean }>,
+): ModelToolsClient {
+ return {
+ async validateQuery(
+ query: string,
+ ): Promise {
+ try {
+ const response = await questClient.validateQuery(query)
+ if ("error" in response) {
+ const errorResponse = response as {
+ error: string
+ position: number
+ query: string
+ }
+ return {
+ valid: false,
+ error: String(errorResponse.error),
+ position: Number(errorResponse.position),
+ }
+ }
+ return {
+ valid: true,
+ }
+ } catch (err) {
+ const errorMessage =
+ err instanceof Error
+ ? err.message
+ : "Failed to validate query. Something went wrong with the server."
+ return {
+ valid: false,
+ error: errorMessage,
+ position: -1,
+ }
+ }
+ },
+ ...(tables
+ ? {
+ getTables(): Promise<
+ Array<{ name: string; type: "table" | "matview" }>
+ > {
+ return Promise.resolve(
+ tables.map((table) => ({
+ name: table.table_name,
+ type: table.matView ? "matview" : ("table" as const),
+ })),
+ )
+ },
+
+ async getTableSchema(tableName: string): Promise {
+ try {
+ const table = tables.find((t) => t.table_name === tableName)
+ if (!table) {
+ return null
+ }
+
+ const ddlResponse = table.matView
+ ? await questClient.showMatViewDDL(tableName)
+ : await questClient.showTableDDL(tableName)
+
+ if (
+ ddlResponse?.type === Type.DQL &&
+ ddlResponse.data?.[0]?.ddl
+ ) {
+ return ddlResponse.data[0].ddl
+ }
+
+ return null
+ } catch (error) {
+ console.error(
+ `Failed to fetch schema for table ${tableName}:`,
+ error,
+ )
+ return null
+ }
+ },
+ }
+ : {}),
+ }
+}
+
+const DOCS_INSTRUCTION_ANTHROPIC = `
+CRITICAL: Always follow this two-phase documentation approach:
+1. Use get_questdb_toc to see available functions/keywords/operators
+2. Use get_questdb_documentation to get details for specific items you'll use`
+
+const getUnifiedPrompt = (grantSchemaAccess?: boolean) => {
+ const base = `You are a SQL expert assistant specializing in QuestDB, a high-performance time-series database. You help users with:
+- Generating QuestDB SQL queries from natural language descriptions
+- Explaining what QuestDB SQL queries do
+- Fixing errors in QuestDB SQL queries
+- Refining and modifying existing queries based on user requests
+
+## When Explaining Queries
+- Focus on the business logic and what the query achieves, not the SQL syntax itself
+- Pay special attention to QuestDB-specific features:
+ - Time-series operations (SAMPLE BY, LATEST ON, designated timestamp columns)
+ - Time-based filtering and aggregations
+ - Real-time data ingestion patterns
+ - Performance optimizations specific to time-series data
+
+## When Generating SQL
+- Always validate the query using the validate_query tool before returning the generated SQL query
+- Generate only valid QuestDB SQL syntax referring to the documentation about functions, operators, and SQL keywords
+- Use appropriate time-series functions (SAMPLE BY, LATEST ON, etc.) and common table expressions when relevant
+- Use \`IN\` with \`today()\`, \`tomorrow()\`, \`yesterday()\` interval functions when relevant
+- Follow QuestDB best practices for performance referring to the documentation
+- Use proper timestamp handling for time-series data
+- Use correct data types and functions specific to QuestDB referring to the documentation. Do not use any word that is not in the documentation.
+
+## When Fixing Queries
+- Always validate the query using the validate_query tool before returning the fixed SQL query
+- Analyze the error message carefully to understand what went wrong
+- Generate only valid QuestDB SQL syntax by always referring to the documentation about functions, operators, and SQL keywords
+- Preserve the original intent of the query while fixing the error
+- Follow QuestDB best practices and syntax rules referring to the documentation
+- Consider common issues like:
+ - Missing or incorrect column names
+ - Invalid syntax for time-series operations
+ - Data type mismatches
+ - Incorrect function usage
+
+## Response Guidelines
+- Modify a query by returning "sql" field only if the user asks you to generate, fix, or make changes to the query. If the user does not ask for fixing/changing/generating a query, return null in the "sql" field. Every time you provide a SQL query, the current SQL is updated.
+- Always provide the "explanation" field, which should be a 2-4 sentence explanation in markdown format.
+
+## Tools
+
+`
+ const schemaAccess = grantSchemaAccess
+ ? `You have access to schema tools:
+- Use the get_tables tool to retrieve all tables and materialized views in the database instance
+- Use the get_table_schema tool to get detailed schema information for a specific table or a materialized view
+`
+ : ""
+ return base + schemaAccess + DOCS_INSTRUCTION_ANTHROPIC
+}
+
+export const getExplainSchemaPrompt = (
+ tableName: string,
+ schema: string,
+ isMatView: boolean,
+) => `You are a SQL expert assistant specializing in QuestDB, a high-performance time-series database.
+Briefly explain the following ${isMatView ? "materialized view" : "table"} schema in detail. Include:
+- The purpose of the ${isMatView ? "materialized view" : "table"}
+- What each column represents and its data type
+- Any important properties like WAL enablement, partitioning strategy, designated timestamps
+- Any performance or storage considerations
+
+${isMatView ? "Materialized View" : "Table"} Name: ${tableName}
+
+Schema:
+\`\`\`sql
+${schema}
+\`\`\`
+
+Provide a short explanation that helps developers understand how to use this ${isMatView ? "materialized view" : "table"}.
+
+Return a JSON string with the following structure:
+{ "explanation": "The purpose of the table/materialized view", "columns": [ { "name": "Column Name", "description": "Column Description", "data_type": "Data Type" } ], "storage_details": ["Storage detail 1", "Storage detail 2"] }`
+
+const MAX_RETRIES = 2
+const RETRY_DELAY = 1000
+
+let lastRequestTime = 0
+const MIN_REQUEST_INTERVAL = 2000
+
+const handleRateLimit = async () => {
+ const now = Date.now()
+ const timeSinceLastRequest = now - lastRequestTime
+ if (timeSinceLastRequest < MIN_REQUEST_INTERVAL) {
+ await new Promise((resolve) =>
+ setTimeout(resolve, MIN_REQUEST_INTERVAL - timeSinceLastRequest),
+ )
+ }
+ lastRequestTime = Date.now()
+}
+
+const isNonRetryableError = (error: unknown) => {
+ return (
+ error instanceof RefusalError ||
+ error instanceof MaxTokensError ||
+ error instanceof Anthropic.AuthenticationError ||
+ (typeof OpenAI !== "undefined" &&
+ error instanceof OpenAI.AuthenticationError) ||
+ // @ts-expect-error no proper rate limit error type
+ ("status" in error && error.status === 429)
+ )
+}
+
+const executeTool = async (
+ toolName: string,
+ input: unknown,
+ modelToolsClient: ModelToolsClient,
+ setStatus: StatusCallback,
+): Promise<{ content: string; is_error?: boolean }> => {
+ try {
+ switch (toolName) {
+ case "get_tables": {
+ setStatus(AIOperationStatus.RetrievingTables)
+ if (!modelToolsClient.getTables) {
+ return {
+ content:
+ "Error: Schema access is not granted. This tool is not available.",
+ is_error: true,
+ }
+ }
+ const result = await modelToolsClient.getTables()
+ const MAX_TABLES = 1000
+ if (result.length > MAX_TABLES) {
+ const truncated = result.slice(0, MAX_TABLES)
+ return {
+ content: JSON.stringify(
+ {
+ tables: truncated,
+ total_count: result.length,
+ truncated: true,
+ message: `Showing ${MAX_TABLES} of ${result.length} tables. Use get_table_schema with a specific table name to get details if you are interested in a specific table.`,
+ },
+ null,
+ 2,
+ ),
+ }
+ }
+ return { content: JSON.stringify(result, null, 2) }
+ }
+ case "get_table_schema": {
+ const tableName = (input as { table_name: string })?.table_name
+ if (!modelToolsClient.getTableSchema) {
+ return {
+ content:
+ "Error: Schema access is not granted. This tool is not available.",
+ is_error: true,
+ }
+ }
+ if (!tableName) {
+ return {
+ content: "Error: table_name parameter is required",
+ is_error: true,
+ }
+ }
+ setStatus(AIOperationStatus.InvestigatingTableSchema, {
+ name: tableName,
+ })
+ const result = await modelToolsClient.getTableSchema(tableName)
+ return {
+ content:
+ result || `Table '${tableName}' not found or schema unavailable`,
+ }
+ }
+ case "validate_query": {
+ setStatus(AIOperationStatus.ValidatingQuery)
+ const query = (input as { query: string })?.query
+ if (!query) {
+ return {
+ content: "Error: query parameter is required",
+ is_error: true,
+ }
+ }
+ const result = await modelToolsClient.validateQuery(query)
+ const content = {
+ valid: result.valid,
+ error: result.valid ? undefined : result.error,
+ position: result.valid ? undefined : result.position,
+ }
+ return { content: JSON.stringify(content, null, 2) }
+ }
+ case "get_questdb_toc": {
+ setStatus(AIOperationStatus.RetrievingDocumentation)
+ const tocContent = await getQuestDBTableOfContents()
+ return { content: tocContent }
+ }
+ case "get_questdb_documentation": {
+ const { category, items } =
+ (input as { category: string; items: string[] }) || {}
+ if (!category || !items || !Array.isArray(items)) {
+ return {
+ content: "Error: category and items parameters are required",
+ is_error: true,
+ }
+ }
+ const parsedItems = parseDocItems(items)
+
+ if (parsedItems.length > 0) {
+ setStatus(AIOperationStatus.InvestigatingDocs, { items: parsedItems })
+ } else {
+ setStatus(AIOperationStatus.InvestigatingDocs)
+ }
+ const documentation = await getSpecificDocumentation(
+ category as DocCategory,
+ items,
+ )
+ return { content: documentation }
+ }
+ default:
+ return { content: `Unknown tool: ${toolName}`, is_error: true }
+ }
+ } catch (error) {
+ return {
+ content: `Tool execution error: ${error instanceof Error ? error.message : "Unknown error"}`,
+ is_error: true,
+ }
+ }
+}
+
+interface AnthropicToolCallResult {
+ message: Anthropic.Messages.Message
+ accumulatedTokens: TokenUsage
+}
+
+async function handleToolCalls(
+ message: Anthropic.Messages.Message,
+ anthropic: Anthropic,
+ modelToolsClient: ModelToolsClient,
+ conversationHistory: Array,
+ model: string,
+ setStatus: StatusCallback,
+ responseFormat: ResponseTextConfig,
+ abortSignal?: AbortSignal,
+ accumulatedTokens: TokenUsage = { inputTokens: 0, outputTokens: 0 },
+): Promise {
+ const toolUseBlocks = message.content.filter(
+ (block) => block.type === "tool_use",
+ )
+ const toolResults = []
+
+ if (abortSignal?.aborted) {
+ return {
+ type: "aborted",
+ message: "Operation was cancelled",
+ } as AiAssistantAPIError
+ }
+
+ for (const toolUse of toolUseBlocks) {
+ if ("name" in toolUse) {
+ const exec = await executeTool(
+ toolUse.name,
+ toolUse.input,
+ modelToolsClient,
+ setStatus,
+ )
+ toolResults.push({
+ type: "tool_result" as const,
+ tool_use_id: toolUse.id,
+ content: exec.content,
+ is_error: exec.is_error,
+ })
+ }
+ }
+
+ const updatedHistory = [
+ ...conversationHistory,
+ {
+ role: "assistant" as const,
+ content: message.content,
+ },
+ {
+ role: "user" as const,
+ content: toolResults,
+ },
+ ]
+
+ const criticalTokenUsage =
+ message.usage.input_tokens >= COMPACTION_THRESHOLDS["anthropic"] &&
+ toolResults.length > 0
+ if (criticalTokenUsage) {
+ updatedHistory.push({
+ role: "user" as const,
+ content:
+ "**CRITICAL TOKEN USAGE: The conversation is getting too long to fit the context window. If you are planning to use more tools, summarize your findings to the user first, and wait for user confirmation to continue working on the task.**",
+ })
+ }
+
+ const followUpParams: Parameters[1] = {
+ model,
+ tools: modelToolsClient ? ALL_TOOLS : REFERENCE_TOOLS,
+ messages: updatedHistory,
+ temperature: 0.3,
+ }
+
+ const format = responseFormat.format as { type: string; schema?: object }
+ if (format.type === "json_schema" && format.schema) {
+ // @ts-expect-error - output_format is a new field not yet in the type definitions
+ followUpParams.output_format = {
+ type: "json_schema",
+ schema: format.schema,
+ }
+ }
+
+ const followUpMessage = await createAnthropicMessage(
+ anthropic,
+ followUpParams,
+ )
+
+ // Accumulate tokens from this response
+ const newAccumulatedTokens: TokenUsage = {
+ inputTokens:
+ accumulatedTokens.inputTokens +
+ (followUpMessage.usage?.input_tokens || 0),
+ outputTokens:
+ accumulatedTokens.outputTokens +
+ (followUpMessage.usage?.output_tokens || 0),
+ }
+
+ if (followUpMessage.stop_reason === "tool_use") {
+ return handleToolCalls(
+ followUpMessage,
+ anthropic,
+ modelToolsClient,
+ updatedHistory,
+ model,
+ setStatus,
+ responseFormat,
+ abortSignal,
+ newAccumulatedTokens,
+ )
+ }
+
+ return {
+ message: followUpMessage,
+ accumulatedTokens: newAccumulatedTokens,
+ }
+}
+
+const extractOpenAIToolCalls = (
+ response: OpenAI.Responses.Response,
+): { id?: string; name: string; arguments: unknown; call_id: string }[] => {
+ const calls = []
+ for (const item of response.output) {
+ if (item?.type === "function_call") {
+ const args =
+ typeof item.arguments === "string"
+ ? safeJsonParse(item.arguments)
+ : item.arguments || {}
+ calls.push({
+ id: item.id,
+ name: item.name,
+ arguments: args,
+ call_id: item.call_id,
+ })
+ }
+ }
+ return calls
+}
+
+const getOpenAIText = (
+ response: OpenAI.Responses.Response,
+): { type: "refusal" | "text"; message: string } => {
+ const out = response.output || []
+ if (
+ out.find(
+ (item: ResponseOutputItem) =>
+ item.type === "message" &&
+ item.content.some((c) => c.type === "refusal"),
+ )
+ ) {
+ return {
+ type: "refusal",
+ message: "The model refused to generate a response for this request.",
+ }
+ }
+ return { type: "text", message: response.output_text }
+}
+
+const safeJsonParse = (text: string): T | object => {
+ try {
+ return JSON.parse(text) as T
+ } catch {
+ return {}
+ }
+}
+
+const tryWithRetries = async (
+ fn: () => Promise,
+ setStatus: StatusCallback,
+ abortSignal?: AbortSignal,
+): Promise => {
+ let retries = 0
+ while (retries <= MAX_RETRIES) {
+ try {
+ if (abortSignal?.aborted) {
+ return {
+ type: "aborted",
+ message: "Operation was cancelled",
+ } as AiAssistantAPIError
+ }
+
+ return await fn()
+ } catch (error) {
+ retries++
+ if (retries > MAX_RETRIES || isNonRetryableError(error)) {
+ setStatus(null)
+ return handleAiAssistantError(error)
+ }
+
+ await new Promise((resolve) => setTimeout(resolve, RETRY_DELAY * retries))
+ }
+ }
+
+ setStatus(null)
+ return {
+ type: "unknown",
+ message: `Failed to get response after ${retries} retries`,
+ }
+}
+
+interface OpenAIFlowConfig {
+ systemInstructions: string
+ initialUserContent: string
+ conversationHistory?: Array<{ role: "user" | "assistant"; content: string }>
+ responseFormat: ResponseTextConfig
+ postProcess?: (formatted: T) => T
+}
+
+interface AnthropicFlowConfig {
+ systemInstructions: string
+ initialUserContent: string
+ conversationHistory?: Array<{ role: "user" | "assistant"; content: string }>
+ responseFormat: ResponseTextConfig
+ postProcess?: (formatted: T) => T
+}
+
+interface ExecuteAnthropicFlowParams {
+ anthropic: Anthropic
+ model: string
+ config: AnthropicFlowConfig
+ modelToolsClient: ModelToolsClient
+ setStatus: StatusCallback
+ abortSignal?: AbortSignal
+}
+
+interface ExecuteOpenAIFlowParams {
+ openai: OpenAI
+ model: string
+ config: OpenAIFlowConfig
+ modelToolsClient: ModelToolsClient
+ setStatus: StatusCallback
+ abortSignal?: AbortSignal
+}
+
+const executeOpenAIFlow = async ({
+ openai,
+ model,
+ config,
+ modelToolsClient,
+ setStatus,
+ abortSignal,
+}: ExecuteOpenAIFlowParams): Promise => {
+ let input: OpenAI.Responses.ResponseInput = []
+ if (config.conversationHistory && config.conversationHistory.length > 0) {
+ const validMessages = config.conversationHistory.filter(
+ (msg) => msg.content && msg.content.trim() !== "",
+ )
+ for (const msg of validMessages) {
+ input.push({
+ role: msg.role,
+ content: msg.content,
+ })
+ }
+ }
+
+ input.push({
+ role: "user",
+ content: config.initialUserContent,
+ })
+
+ const grantSchemaAccess = !!modelToolsClient.getTables
+ const openaiTools = toOpenAIFunctions(
+ grantSchemaAccess ? ALL_TOOLS : REFERENCE_TOOLS,
+ )
+
+ // Accumulate tokens across all iterations
+ let totalInputTokens = 0
+ let totalOutputTokens = 0
+
+ let lastResponse = await openai.responses.create({
+ ...getModelProps(model),
+ instructions: config.systemInstructions,
+ input,
+ tools: openaiTools,
+ text: config.responseFormat,
+ } as OpenAI.Responses.ResponseCreateParamsNonStreaming)
+ input = [...input, ...lastResponse.output]
+
+ // Add tokens from first response
+ totalInputTokens += lastResponse.usage?.input_tokens ?? 0
+ totalOutputTokens += lastResponse.usage?.output_tokens ?? 0
+
+ while (true) {
+ if (abortSignal?.aborted) {
+ return {
+ type: "aborted",
+ message: "Operation was cancelled",
+ } as AiAssistantAPIError
+ }
+
+ const toolCalls = extractOpenAIToolCalls(lastResponse)
+ if (!toolCalls.length) break
+ const tool_outputs: OpenAI.Responses.ResponseFunctionToolCallOutputItem[] =
+ []
+ for (const tc of toolCalls) {
+ const exec = await executeTool(
+ tc.name,
+ tc.arguments,
+ modelToolsClient,
+ setStatus,
+ )
+ tool_outputs.push({
+ type: "function_call_output",
+ call_id: tc.call_id,
+ output: exec.content,
+ } as OpenAI.Responses.ResponseFunctionToolCallOutputItem)
+ }
+ input = [...input, ...tool_outputs]
+
+ if (
+ (lastResponse.usage?.input_tokens ?? 0) >=
+ COMPACTION_THRESHOLDS["openai"] &&
+ tool_outputs.length > 0
+ ) {
+ input.push({
+ role: "user" as const,
+ content:
+ "**CRITICAL TOKEN USAGE: The conversation is getting too long to fit the context window. If you are planning to use more tools, summarize your findings to the user first, and wait for user confirmation to continue working on the task.**",
+ })
+ }
+ lastResponse = await openai.responses.create({
+ ...getModelProps(model),
+ instructions: config.systemInstructions,
+ input,
+ tools: openaiTools,
+ text: config.responseFormat,
+ })
+ input = [...input, ...lastResponse.output]
+
+ // Accumulate tokens from each iteration
+ totalInputTokens += lastResponse.usage?.input_tokens ?? 0
+ totalOutputTokens += lastResponse.usage?.output_tokens ?? 0
+ }
+
+ if (abortSignal?.aborted) {
+ return {
+ type: "aborted",
+ message: "Operation was cancelled",
+ } as AiAssistantAPIError
+ }
+
+ const text = getOpenAIText(lastResponse)
+ if (text.type === "refusal") {
+ return {
+ type: "unknown",
+ message: text.message,
+ } as AiAssistantAPIError
+ }
+
+ const rawOutput = text.message
+
+ try {
+ const json = JSON.parse(rawOutput) as T
+ setStatus(null)
+
+ const resultWithTokens = {
+ ...json,
+ tokenUsage: {
+ inputTokens: totalInputTokens,
+ outputTokens: totalOutputTokens,
+ },
+ } as T & { tokenUsage: TokenUsage }
+
+ if (config.postProcess) {
+ const processed = config.postProcess(json)
+ return {
+ ...processed,
+ tokenUsage: {
+ inputTokens: totalInputTokens,
+ outputTokens: totalOutputTokens,
+ },
+ } as T & { tokenUsage: TokenUsage }
+ }
+ return resultWithTokens
+ } catch (error) {
+ setStatus(null)
+ return {
+ type: "unknown",
+ message: "Failed to parse assistant response.",
+ } as AiAssistantAPIError
+ }
+}
+
+const executeAnthropicFlow = async ({
+ anthropic,
+ model,
+ config,
+ modelToolsClient,
+ setStatus,
+ abortSignal,
+}: ExecuteAnthropicFlowParams): Promise => {
+ const initialMessages: MessageParam[] = []
+ if (config.conversationHistory && config.conversationHistory.length > 0) {
+ const validMessages = config.conversationHistory.filter(
+ (msg) => msg.content && msg.content.trim() !== "",
+ )
+ for (const msg of validMessages) {
+ initialMessages.push({
+ role: msg.role,
+ content: msg.content,
+ })
+ }
+ }
+
+ initialMessages.push({
+ role: "user" as const,
+ content: config.initialUserContent,
+ })
+
+ const grantSchemaAccess = !!modelToolsClient.getTables
+
+ const messageParams: Parameters[1] = {
+ model,
+ system: config.systemInstructions,
+ tools: grantSchemaAccess ? ALL_TOOLS : REFERENCE_TOOLS,
+ messages: initialMessages,
+ temperature: 0.3,
+ }
+
+ if (config.responseFormat?.format) {
+ const format = config.responseFormat.format as {
+ type: string
+ schema?: object
+ }
+ if (format.type === "json_schema" && format.schema) {
+ // @ts-expect-error - output_format is a new field not yet in the type definitions
+ messageParams.output_format = {
+ type: "json_schema",
+ schema: format.schema,
+ }
+ }
+ }
+
+ const message = await createAnthropicMessage(anthropic, messageParams)
+
+ let totalInputTokens = message.usage?.input_tokens || 0
+ let totalOutputTokens = message.usage?.output_tokens || 0
+
+ let responseMessage: Anthropic.Messages.Message
+
+ if (message.stop_reason === "tool_use") {
+ const toolCallResult = await handleToolCalls(
+ message,
+ anthropic,
+ modelToolsClient,
+ initialMessages,
+ model,
+ setStatus,
+ config.responseFormat,
+ abortSignal,
+ { inputTokens: 0, outputTokens: 0 }, // Start fresh, we already counted initial message
+ )
+
+ if ("type" in toolCallResult && "message" in toolCallResult) {
+ return toolCallResult
+ }
+
+ const result = toolCallResult
+ responseMessage = result.message
+ totalInputTokens += result.accumulatedTokens.inputTokens
+ totalOutputTokens += result.accumulatedTokens.outputTokens
+ } else {
+ responseMessage = message
+ }
+
+ if (abortSignal?.aborted) {
+ return {
+ type: "aborted",
+ message: "Operation was cancelled",
+ } as AiAssistantAPIError
+ }
+
+ const textBlock = responseMessage.content.find(
+ (block) => block.type === "text",
+ )
+ if (!textBlock || !("text" in textBlock)) {
+ setStatus(null)
+ return {
+ type: "unknown",
+ message: "No text response received from assistant.",
+ } as AiAssistantAPIError
+ }
+
+ try {
+ const json = JSON.parse(textBlock.text) as T
+ setStatus(null)
+
+ const resultWithTokens = {
+ ...json,
+ tokenUsage: {
+ inputTokens: totalInputTokens,
+ outputTokens: totalOutputTokens,
+ },
+ } as T & { tokenUsage: TokenUsage }
+
+ if (config.postProcess) {
+ const processed = config.postProcess(json)
+ return {
+ ...processed,
+ tokenUsage: {
+ inputTokens: totalInputTokens,
+ outputTokens: totalOutputTokens,
+ },
+ } as T & { tokenUsage: TokenUsage }
+ }
+ return resultWithTokens
+ } catch (error) {
+ setStatus(null)
+ return {
+ type: "unknown",
+ message: "Failed to parse assistant response.",
+ } as AiAssistantAPIError
+ }
+}
+
+export const explainTableSchema = async ({
+ tableName,
+ schema,
+ isMatView,
+ settings,
+ setStatus,
+}: {
+ tableName: string
+ schema: string
+ isMatView: boolean
+ settings: ActiveProviderSettings
+ setStatus: StatusCallback
+}): Promise => {
+ if (!settings.apiKey || !settings.model) {
+ return {
+ type: "invalid_key",
+ message: "API key is missing",
+ }
+ }
+ if (!tableName || !schema) {
+ return {
+ type: "unknown",
+ message: "Cannot find schema for the table",
+ }
+ }
+
+ await handleRateLimit()
+ setStatus(AIOperationStatus.Processing)
+
+ return tryWithRetries(async () => {
+ const clients = createProviderClients(settings)
+
+ if (clients.provider === "openai") {
+ const prompt = getExplainSchemaPrompt(tableName, schema, isMatView)
+
+ const formattingOutput = await clients.openai.responses.parse({
+ ...getModelProps(settings.model),
+ instructions: getExplainSchemaPrompt(tableName, schema, isMatView),
+ input: [{ role: "user", content: prompt }],
+ text: ExplainTableSchemaFormat,
+ })
+
+ const formatted =
+ formattingOutput.output_parsed as TableSchemaExplanation | null
+ setStatus(null)
+ if (!formatted) {
+ return {
+ type: "unknown",
+ message: "Failed to parse assistant response.",
+ } as AiAssistantAPIError
+ }
+ const openAIUsage = formattingOutput.usage
+ return {
+ explanation: formatted.explanation || "",
+ columns: formatted.columns || [],
+ storage_details: formatted.storage_details || [],
+ tokenUsage: openAIUsage
+ ? {
+ inputTokens: openAIUsage.input_tokens,
+ outputTokens: openAIUsage.output_tokens,
+ }
+ : undefined,
+ }
+ }
+
+ const anthropic = clients.anthropic
+ const messageParams: Parameters[1] = {
+ model: getModelProps(settings.model).model,
+ messages: [
+ {
+ role: "user" as const,
+ content: getExplainSchemaPrompt(tableName, schema, isMatView),
+ },
+ ],
+ temperature: 0.3,
+ }
+ const schemaFormat = ExplainTableSchemaFormat.format as {
+ type: string
+ schema?: object
+ }
+ // @ts-expect-error - output_format is a new field not yet in the type definitions
+ messageParams.output_format = {
+ type: "json_schema",
+ schema: schemaFormat.schema,
+ }
+
+ const message = await createAnthropicMessage(anthropic, messageParams)
+
+ const textBlock = message.content.find((block) => block.type === "text")
+ if (!textBlock || !("text" in textBlock)) {
+ setStatus(null)
+ return {
+ type: "unknown",
+ message: "No text response received from assistant.",
+ } as AiAssistantAPIError
+ }
+
+ try {
+ const json = JSON.parse(textBlock.text) as TableSchemaExplanation
+ setStatus(null)
+ const anthropicUsage = message.usage
+ return {
+ explanation: json.explanation || "",
+ columns: json.columns || [],
+ storage_details: json.storage_details || [],
+ tokenUsage: anthropicUsage
+ ? {
+ inputTokens: anthropicUsage.input_tokens,
+ outputTokens: anthropicUsage.output_tokens,
+ }
+ : undefined,
+ }
+ } catch (error) {
+ setStatus(null)
+ return {
+ type: "unknown",
+ message: "Failed to parse assistant response.",
+ } as AiAssistantAPIError
+ }
+ }, setStatus)
+}
+
+class RefusalError extends Error {
+ constructor(message: string) {
+ super(message)
+ this.name = "RefusalError"
+ }
+}
+
+class MaxTokensError extends Error {
+ constructor(message: string) {
+ super(message)
+ this.name = "MaxTokensError"
+ }
+}
+
+async function createAnthropicMessage(
+ anthropic: Anthropic,
+ params: Omit & {
+ max_tokens?: number
+ },
+): Promise {
+ const message = await anthropic.messages.create(
+ {
+ ...params,
+ stream: false,
+ max_tokens: params.max_tokens ?? 8192,
+ },
+ {
+ headers: {
+ "anthropic-beta": "structured-outputs-2025-11-13",
+ },
+ },
+ )
+
+ if (message.stop_reason === "refusal") {
+ throw new RefusalError(
+ "The model refused to generate a response for this request.",
+ )
+ }
+ if (message.stop_reason === "max_tokens") {
+ throw new MaxTokensError(
+ "The response exceeded the maximum token limit. Please try again with a different prompt or model.",
+ )
+ }
+
+ return message
+}
+
+function handleAiAssistantError(error: unknown): AiAssistantAPIError {
+ if (error instanceof RefusalError) {
+ return {
+ type: "unknown",
+ message: "The model refused to generate a response for this request.",
+ details: error.message,
+ }
+ }
+
+ if (error instanceof MaxTokensError) {
+ return {
+ type: "unknown",
+ message:
+ "The response exceeded the maximum token limit for the selected model. Please try again with a different prompt or model.",
+ details: error.message,
+ }
+ }
+
+ if (error instanceof Anthropic.AuthenticationError) {
+ return {
+ type: "invalid_key",
+ message: "Invalid API key. Please check your Anthropic API key.",
+ details: error.message,
+ }
+ }
+
+ if (error instanceof Anthropic.RateLimitError) {
+ return {
+ type: "rate_limit",
+ message: "Rate limit exceeded. Please try again later.",
+ details: error.message,
+ }
+ }
+
+ if (error instanceof Anthropic.APIConnectionError) {
+ return {
+ type: "network",
+ message: "Network error. Please check your internet connection.",
+ details: error.message,
+ }
+ }
+
+ if (error instanceof Anthropic.APIError) {
+ return {
+ type: "unknown",
+ message: `Anthropic API error: ${error.message}`,
+ }
+ }
+
+ if (error instanceof OpenAI.APIError) {
+ return {
+ type: "unknown",
+ message: `OpenAI API error: ${error.message}`,
+ }
+ }
+
+ return {
+ type: "unknown",
+ message: "An unexpected error occurred. Please try again.",
+ details: error as string,
+ }
+}
+
+export const testApiKey = async (
+ apiKey: string,
+ model: string,
+): Promise<{ valid: boolean; error?: string }> => {
+ try {
+ if (inferProviderFromModel(model) === "anthropic") {
+ const anthropic = new Anthropic({
+ apiKey,
+ dangerouslyAllowBrowser: true,
+ })
+
+ await createAnthropicMessage(anthropic, {
+ model,
+ messages: [
+ {
+ role: "user",
+ content: "ping",
+ },
+ ],
+ })
+ } else {
+ const openai = new OpenAI({ apiKey, dangerouslyAllowBrowser: true })
+ await openai.responses.create({
+ model: getModelProps(model).model,
+ input: [{ role: "user", content: "ping" }],
+ max_output_tokens: 16,
+ })
+ }
+
+ return { valid: true }
+ } catch (error: unknown) {
+ if (error instanceof Anthropic.AuthenticationError) {
+ return {
+ valid: false,
+ error: "Invalid API key",
+ }
+ }
+
+ if (error instanceof Anthropic.RateLimitError) {
+ return {
+ valid: true,
+ }
+ }
+
+ const status =
+ (error as { status?: number })?.status ||
+ (error as { error?: { status?: number } })?.error?.status
+ if (status === 401) {
+ return { valid: false, error: "Invalid API key" }
+ }
+ if (status === 429) {
+ return { valid: true }
+ }
+
+ return {
+ valid: false,
+ error:
+ error instanceof Error ? error.message : "Failed to validate API key",
+ }
+ }
+}
+
+const ChatTitleFormat: ResponseTextConfig = {
+ format: {
+ type: "json_schema" as const,
+ name: "chat_title_format",
+ schema: {
+ type: "object",
+ properties: {
+ title: { type: "string" },
+ },
+ required: ["title"],
+ additionalProperties: false,
+ },
+ strict: true,
+ },
+}
+
+export const generateChatTitle = async ({
+ firstUserMessage,
+ settings,
+}: {
+ firstUserMessage: string
+ settings: ActiveProviderSettings
+}): Promise => {
+ if (!settings.apiKey || !settings.model) {
+ return null
+ }
+
+ try {
+ const clients = createProviderClients(settings)
+
+ const prompt = `Generate a concise chat title (max 30 characters) for this conversation. The title should capture the main topic or intent.
+
+User's message:
+${firstUserMessage}
+
+Return a JSON object with the following structure: { "title": "Your title here" }`
+
+ if (clients.provider === "openai") {
+ const response = await clients.openai.responses.create({
+ ...getModelProps(settings.model),
+ input: [{ role: "user", content: prompt }],
+ text: ChatTitleFormat,
+ max_output_tokens: 100,
+ })
+ try {
+ const parsed = JSON.parse(response.output_text) as { title: string }
+ return parsed.title || null
+ } catch {
+ return null
+ }
+ }
+
+ const messageParams: Parameters[1] = {
+ model: settings.model,
+ messages: [{ role: "user", content: prompt }],
+ max_tokens: 100,
+ temperature: 0.3,
+ }
+ const titleFormat = ChatTitleFormat.format as {
+ type: string
+ schema?: object
+ }
+ // @ts-expect-error - output_format is a new field not yet in the type definitions
+ messageParams.output_format = {
+ type: "json_schema",
+ schema: titleFormat.schema,
+ }
+
+ const message = await createAnthropicMessage(
+ clients.anthropic,
+ messageParams,
+ )
+
+ const textBlock = message.content.find((block) => block.type === "text")
+ if (textBlock && "text" in textBlock) {
+ try {
+ const parsed = JSON.parse(textBlock.text) as { title: string }
+ return parsed.title?.slice(0, 40) || null
+ } catch {
+ return null
+ }
+ }
+ return null
+ } catch (error) {
+ // Silently fail - title generation is not critical
+ console.warn("Failed to generate chat title:", error)
+ return null
+ }
+}
+
+export type AIOperation = "explain" | "fix" | "followup"
+
+export const continueConversation = async ({
+ userMessage,
+ conversationHistory,
+ currentSQL,
+ settings,
+ modelToolsClient,
+ setStatus,
+ abortSignal,
+ operation = "followup",
+}: {
+ userMessage: string
+ conversationHistory: Array
+ currentSQL?: string
+ settings: ActiveProviderSettings
+ modelToolsClient: ModelToolsClient
+ setStatus: StatusCallback
+ abortSignal?: AbortSignal
+ operation?: AIOperation
+ conversationId?: ConversationId
+}): Promise<
+ (GeneratedSQL | AiAssistantExplanation | AiAssistantAPIError) & {
+ compactedConversationHistory?: Array
+ }
+> => {
+ if (!settings.apiKey || !settings.model) {
+ return {
+ type: "invalid_key",
+ message: "API key or model is missing",
+ }
+ }
+
+ await handleRateLimit()
+ if (abortSignal?.aborted) {
+ return {
+ type: "aborted",
+ message: "Operation was cancelled",
+ }
+ }
+
+ const responseFormat = {
+ explain: ExplainFormat,
+ fix: FixSQLFormat,
+ followup: ConversationResponseFormat,
+ }[operation]
+
+ return tryWithRetries(
+ async () => {
+ const clients = createProviderClients(settings)
+ const grantSchemaAccess = !!modelToolsClient.getTables
+ const systemPrompt = getUnifiedPrompt(grantSchemaAccess)
+
+ let workingConversationHistory = conversationHistory
+ let isCompacted = false
+
+ setStatus(AIOperationStatus.Processing)
+ if (conversationHistory.length > 0) {
+ const compactionResult = await compactConversationIfNeeded(
+ conversationHistory,
+ settings.provider,
+ systemPrompt,
+ userMessage,
+ () => setStatus(AIOperationStatus.Compacting),
+ {
+ anthropicClient:
+ clients.provider === "anthropic" ? clients.anthropic : undefined,
+ openaiClient:
+ clients.provider === "openai" ? clients.openai : undefined,
+ model: settings.model,
+ },
+ )
+
+ if ("error" in compactionResult) {
+ setStatus(null)
+ return {
+ type: "unknown" as const,
+ message: compactionResult.error,
+ }
+ }
+
+ if (compactionResult.wasCompacted) {
+ workingConversationHistory = [
+ ...conversationHistory.map((m) => ({ ...m, isCompacted: true })),
+ {
+ id: crypto.randomUUID(),
+ role: "assistant" as const,
+ content: compactionResult.compactedMessage,
+ hideFromUI: true,
+ timestamp: Date.now(),
+ },
+ ]
+ isCompacted = true
+ }
+ }
+ setStatus(AIOperationStatus.Processing)
+
+ const postProcess = (formatted: {
+ sql?: string | null
+ explanation: string
+ tokenUsage?: TokenUsage
+ }): GeneratedSQL => {
+ const sql =
+ formatted?.sql === null
+ ? null
+ : formatted?.sql
+ ? normalizeSql(formatted.sql)
+ : currentSQL || ""
+ return {
+ sql,
+ explanation: formatted?.explanation || "",
+ tokenUsage: formatted.tokenUsage,
+ }
+ }
+
+ if (clients.provider === "openai") {
+ const result = await executeOpenAIFlow<{
+ sql?: string | null
+ explanation: string
+ tokenUsage?: TokenUsage
+ }>({
+ openai: clients.openai,
+ model: settings.model,
+ config: {
+ systemInstructions: getUnifiedPrompt(grantSchemaAccess),
+ initialUserContent: userMessage,
+ conversationHistory: workingConversationHistory.filter(
+ (m) => !m.isCompacted,
+ ),
+ responseFormat,
+ postProcess: (formatted) => {
+ const sql =
+ formatted?.sql === null
+ ? null
+ : formatted?.sql
+ ? normalizeSql(formatted.sql)
+ : currentSQL || ""
+ return {
+ sql,
+ explanation: formatted?.explanation || "",
+ tokenUsage: formatted.tokenUsage,
+ }
+ },
+ },
+ modelToolsClient,
+ setStatus,
+ abortSignal,
+ })
+ if (isAiAssistantError(result)) {
+ return result
+ }
+ return {
+ ...postProcess(result),
+ compactedConversationHistory: isCompacted
+ ? workingConversationHistory
+ : undefined,
+ }
+ }
+
+ const result = await executeAnthropicFlow<{
+ sql?: string | null
+ explanation: string
+ tokenUsage?: TokenUsage
+ }>({
+ anthropic: clients.anthropic,
+ model: settings.model,
+ config: {
+ systemInstructions: getUnifiedPrompt(grantSchemaAccess),
+ initialUserContent: userMessage,
+ conversationHistory: workingConversationHistory.filter(
+ (m) => !m.isCompacted,
+ ),
+ responseFormat,
+ postProcess: (formatted) => {
+ const sql =
+ formatted?.sql === null
+ ? null
+ : formatted?.sql
+ ? normalizeSql(formatted.sql)
+ : currentSQL || ""
+ return {
+ sql,
+ explanation: formatted?.explanation || "",
+ tokenUsage: formatted.tokenUsage,
+ }
+ },
+ },
+ modelToolsClient,
+ setStatus,
+ abortSignal,
+ })
+ if (isAiAssistantError(result)) {
+ return result
+ }
+ return {
+ ...postProcess(result),
+ compactedConversationHistory: isCompacted
+ ? workingConversationHistory
+ : undefined,
+ }
+ },
+ setStatus,
+ abortSignal,
+ )
+}
diff --git a/src/utils/aiAssistantSettings.ts b/src/utils/aiAssistantSettings.ts
new file mode 100644
index 000000000..7892e29b5
--- /dev/null
+++ b/src/utils/aiAssistantSettings.ts
@@ -0,0 +1,193 @@
+import { ReasoningEffort } from "openai/resources/shared"
+import type { AiAssistantSettings } from "../providers/LocalStorageProvider/types"
+
+export type Provider = "anthropic" | "openai"
+
+export type ModelOption = {
+ label: string
+ value: string
+ provider: Provider
+ isSlow?: boolean
+ isTestModel?: boolean
+ default?: boolean
+ defaultEnabled?: boolean
+}
+
+export const MODEL_OPTIONS: ModelOption[] = [
+ {
+ label: "Claude Sonnet 4.5",
+ value: "claude-sonnet-4-5",
+ provider: "anthropic",
+ default: true,
+ defaultEnabled: true,
+ },
+ {
+ label: "Claude Opus 4.5",
+ value: "claude-opus-4-5",
+ provider: "anthropic",
+ isSlow: true,
+ defaultEnabled: true,
+ },
+ {
+ label: "Claude Sonnet 4",
+ value: "claude-sonnet-4",
+ provider: "anthropic",
+ },
+ {
+ label: "Claude Haiku 4.5",
+ value: "claude-haiku-4-5",
+ provider: "anthropic",
+ isTestModel: true,
+ },
+ {
+ label: "GPT-5.1 (High Reasoning)",
+ value: "gpt-5.1@reasoning=high",
+ provider: "openai",
+ isSlow: true,
+ },
+ {
+ label: "GPT-5.1 (Medium Reasoning)",
+ value: "gpt-5.1@reasoning=medium",
+ provider: "openai",
+ isSlow: true,
+ defaultEnabled: true,
+ },
+ {
+ label: "GPT-5.1 (No Reasoning)",
+ value: "gpt-5.1",
+ provider: "openai",
+ defaultEnabled: true,
+ isTestModel: true,
+ },
+ {
+ label: "GPT-5",
+ value: "gpt-5",
+ provider: "openai",
+ defaultEnabled: true,
+ },
+ {
+ label: "GPT-5 mini",
+ value: "gpt-5-mini",
+ provider: "openai",
+ default: true,
+ defaultEnabled: true,
+ },
+]
+
+export const providerForModel = (model: ModelOption["value"]): Provider => {
+ return MODEL_OPTIONS.find((m) => m.value === model)!.provider
+}
+
+export const getModelProps = (
+ model: ModelOption["value"],
+): {
+ model: string
+ reasoning?: { effort: ReasoningEffort }
+} => {
+ const modelOption = MODEL_OPTIONS.find((m) => m.value === model)
+ if (!modelOption) {
+ return { model }
+ }
+ const parts = modelOption.value.split("@")
+ const modelName = parts[0]
+ const extraParams = parts[1]
+ if (extraParams) {
+ const params = extraParams.split("=")
+ const paramName = params[0]
+ const paramValue = params[1]
+ if (paramName === "reasoning" && paramValue) {
+ return {
+ model: modelName,
+ reasoning: { effort: paramValue as ReasoningEffort },
+ }
+ }
+ }
+ return { model: modelName }
+}
+
+export const getAllProviders = (): Provider[] => {
+ const providers = new Set()
+ MODEL_OPTIONS.forEach((model) => {
+ providers.add(model.provider)
+ })
+ return Array.from(providers)
+}
+
+export const getSelectedModel = (
+ settings: AiAssistantSettings,
+): string | null => {
+ const selectedModel = settings.selectedModel
+ if (
+ selectedModel &&
+ typeof selectedModel === "string" &&
+ MODEL_OPTIONS.find((m) => m.value === selectedModel)
+ ) {
+ return selectedModel
+ }
+
+ return MODEL_OPTIONS.find((m) => m.default)?.value ?? null
+}
+
+export const getNextModel = (
+ currentModel: string | undefined,
+ enabledModels: Record,
+): string | null => {
+ let nextModel: string | null | undefined = currentModel
+
+ const modelProvider = currentModel ? providerForModel(currentModel) : null
+ if (modelProvider && enabledModels[modelProvider].length > 0) {
+ // Current model is still enabled, so we can use it
+ if (currentModel && enabledModels[modelProvider].includes(currentModel)) {
+ return currentModel
+ }
+ // Take the default model of this provider, otherwise the first enabled model of this provider
+ nextModel =
+ enabledModels[modelProvider].find(
+ (m) => MODEL_OPTIONS.find((mo) => mo.value === m)?.default,
+ ) ?? enabledModels[modelProvider][0]
+ } else {
+ // No other enabled models for this provider, we have to choose from another provider if exists
+ const otherProviderWithEnabledModel = getAllProviders().find(
+ (p) => enabledModels[p].length > 0,
+ )
+ if (otherProviderWithEnabledModel) {
+ nextModel =
+ enabledModels[otherProviderWithEnabledModel].find(
+ (m) => MODEL_OPTIONS.find((mo) => mo.value === m)?.default,
+ ) ?? enabledModels[otherProviderWithEnabledModel][0]
+ } else {
+ nextModel = null
+ }
+ }
+ return nextModel ?? null
+}
+
+export const isAiAssistantConfigured = (
+ settings: AiAssistantSettings,
+): boolean => {
+ return getAllProviders().some(
+ (provider) => !!settings.providers?.[provider]?.apiKey,
+ )
+}
+
+export const canUseAiAssistant = (settings: AiAssistantSettings): boolean => {
+ return isAiAssistantConfigured(settings) && !!settings.selectedModel
+}
+
+export const hasSchemaAccess = (settings: AiAssistantSettings): boolean => {
+ const selectedModel = getSelectedModel(settings)
+ if (!selectedModel) return false
+
+ const anthropicModels = settings.providers?.anthropic?.enabledModels || []
+ const openaiModels = settings.providers?.openai?.enabledModels || []
+
+ if (anthropicModels.includes(selectedModel)) {
+ return settings.providers?.anthropic?.grantSchemaAccess === true
+ }
+
+ if (openaiModels.includes(selectedModel)) {
+ return settings.providers?.openai?.grantSchemaAccess === true
+ }
+
+ return false
+}
diff --git a/src/utils/contextCompaction.ts b/src/utils/contextCompaction.ts
new file mode 100644
index 000000000..63c5dd90a
--- /dev/null
+++ b/src/utils/contextCompaction.ts
@@ -0,0 +1,238 @@
+import Anthropic from "@anthropic-ai/sdk"
+import OpenAI from "openai"
+import type { ConversationMessage } from "../providers/AIConversationProvider/types"
+import {
+ countTokens,
+ COMPACTION_THRESHOLDS,
+ type ConversationMessage as TokenConversationMessage,
+} from "./tokenCounting"
+import {
+ type Provider,
+ MODEL_OPTIONS,
+ getModelProps,
+} from "./aiAssistantSettings"
+
+type CompactionResultSuccess = {
+ compactedMessage: string
+ wasCompacted: true
+}
+
+type CompactionResultTerminationError = {
+ wasCompacted: false
+ error: string
+}
+
+type CompactionResultContinuationError = {
+ wasCompacted: false
+}
+
+export type CompactionResult =
+ | CompactionResultSuccess
+ | CompactionResultTerminationError
+ | CompactionResultContinuationError
+
+const SUMMARIZATION_PROMPT = `Summarize this SQL assistant conversation in a structured format.
+Be extremely concise - use bullet points, not paragraphs.
+
+Required sections:
+1. INITIAL REQUEST OF THE USER: What the user initially asked for (1-2 lines max)
+2. CURRENT SQL: The final SQL query/queries produced (include actual SQL code) if any
+3. KEY DECISIONS: Important choices made during the conversation (bullet points)
+4. CURRENT STATE: Where we left off - what was the last thing discussed
+
+Format your response as:
+---
+## INITIAL REQUEST OF THE USER: [brief description]
+
+
+## CURRENT SQL:
+\`\`\`sql
+[final SQL here, or "None yet" if no SQL was generated]
+\`\`\`
+
+
+## KEY DECISIONS:
+- [decision 1]
+- [decision 2]
+
+
+## CURRENT STATE:
+[what user was working on last]
+---
+
+Keep total summary under 1000 words. Focus on SQL code (if any generated) and outcomes, not process details.`
+
+export function buildContinuationPrompt(summary: string): string {
+ return `## PREVIOUS CONVERSATION SUMMARY:
+
+${summary}
+
+
+**Continue helping the user from where we left off.**`
+}
+
+function toTokenMessages(
+ messages: [...ConversationMessage[], Omit],
+): TokenConversationMessage[] {
+ return messages
+ .filter((m) => m.content && m.content.trim() !== "")
+ .map((m) => ({
+ role: m.role,
+ content: m.content,
+ }))
+}
+
+async function generateSummary(
+ middleMessages: ConversationMessage[],
+ provider: Provider,
+ anthropicClient?: Anthropic,
+ openaiClient?: OpenAI,
+): Promise {
+ const testModel = MODEL_OPTIONS.find(
+ (m) => m.provider === provider && m.isTestModel,
+ )
+ if (!testModel) {
+ throw new Error("No test model found for provider")
+ }
+
+ const conversationText = middleMessages
+ .map((m) => `${m.role.toUpperCase()}: ${m.content}`)
+ .join("\n\n")
+
+ const userMessage = `Please summarize the following conversation:\n\n${conversationText}`
+
+ if (provider === "anthropic" && anthropicClient) {
+ const response = await anthropicClient.messages.create({
+ ...getModelProps(testModel.value),
+ max_tokens: 8192,
+ messages: [{ role: "user", content: userMessage }],
+ system: SUMMARIZATION_PROMPT,
+ })
+
+ const textBlock = response.content.find((block) => block.type === "text")
+ return textBlock?.type === "text" ? textBlock.text : ""
+ } else if (provider === "openai" && openaiClient) {
+ const response = await openaiClient.responses.create({
+ ...getModelProps(testModel.value),
+ instructions: SUMMARIZATION_PROMPT,
+ input: userMessage,
+ })
+
+ return response.output_text || ""
+ }
+
+ throw new Error("No valid client provided for summarization")
+}
+
+export async function compactConversationIfNeeded(
+ conversationHistory: ConversationMessage[],
+ provider: Provider,
+ systemPrompt: string,
+ userMessage: string,
+ setStatusCompacting: () => void,
+ options: {
+ anthropicClient?: Anthropic
+ openaiClient?: OpenAI
+ model?: string
+ } = {},
+): Promise {
+ const messages = [
+ ...conversationHistory,
+ {
+ role: "user" as const,
+ content: userMessage,
+ timestamp: Date.now(),
+ } as Omit,
+ ] as [...ConversationMessage[], Omit]
+ const totalChars =
+ systemPrompt.length + messages.reduce((sum, m) => sum + m.content.length, 0)
+ if (totalChars < COMPACTION_THRESHOLDS[provider]) {
+ return { wasCompacted: false }
+ }
+
+ const tokenMessages = toTokenMessages(messages)
+ const estimatedTokens = await countTokens(
+ provider,
+ tokenMessages,
+ systemPrompt,
+ {
+ anthropicClient: options.anthropicClient,
+ model: options.model,
+ },
+ )
+
+ if (estimatedTokens === -1) {
+ console.error(
+ "Failed to estimate tokens for conversation, using full messages list.",
+ )
+ return {
+ wasCompacted: false,
+ }
+ }
+
+ if (estimatedTokens <= COMPACTION_THRESHOLDS[provider]) {
+ return { wasCompacted: false }
+ }
+
+ if (messages.length < 3) {
+ return {
+ wasCompacted: false,
+ error:
+ "Messages in this conversation are too long to fit the context limit. Please try using shorter messages in a new chat.",
+ }
+ }
+
+ const result = await compactConversationInternal(
+ conversationHistory,
+ provider,
+ setStatusCompacting,
+ options,
+ )
+
+ if (!result.wasCompacted) {
+ return {
+ ...result,
+ error:
+ "Messages in this conversation are too long to fit the context limit. Please try using shorter messages in a new chat.",
+ }
+ }
+
+ return result
+}
+
+async function compactConversationInternal(
+ messages: ConversationMessage[],
+ provider: Provider,
+ setStatusCompacting: () => void,
+ options: {
+ anthropicClient?: Anthropic
+ openaiClient?: OpenAI
+ model?: string
+ } = {},
+): Promise {
+ if (messages.length === 0) {
+ return { wasCompacted: false }
+ }
+
+ setStatusCompacting()
+
+ try {
+ const summary = await generateSummary(
+ messages,
+ provider,
+ options.anthropicClient,
+ options.openaiClient,
+ )
+
+ return {
+ compactedMessage: buildContinuationPrompt(summary),
+ wasCompacted: true,
+ }
+ } catch (error) {
+ console.error("Failed to compact conversation:", error)
+ return {
+ wasCompacted: false,
+ error: "Failed to generate summary for compaction.",
+ }
+ }
+}
diff --git a/src/utils/formatSql.ts b/src/utils/formatSql.ts
index 51cdfb58e..dbbcd0d6c 100644
--- a/src/utils/formatSql.ts
+++ b/src/utils/formatSql.ts
@@ -2,7 +2,7 @@ import { format, FormatOptions } from "sql-formatter"
export const formatSql = (statement: string, options?: FormatOptions) => {
return format(statement, {
- language: "postgresql",
+ language: "mysql",
...options,
})
}
diff --git a/src/utils/hashString.ts b/src/utils/hashString.ts
new file mode 100644
index 000000000..079edd82c
--- /dev/null
+++ b/src/utils/hashString.ts
@@ -0,0 +1,9 @@
+export const hashString = (str: string): string => {
+ let hash = 0
+ for (let i = 0; i < str.length; i++) {
+ const char = str.charCodeAt(i)
+ hash = (hash << 5) - hash + char
+ hash &= hash
+ }
+ return new Uint32Array([hash])[0].toString(36)
+}
diff --git a/src/utils/index.ts b/src/utils/index.ts
index 2b5a234a0..3fdf53168 100644
--- a/src/utils/index.ts
+++ b/src/utils/index.ts
@@ -36,3 +36,4 @@ export * from "./pick"
export * from "./fetchUserLocale"
export * from "./getLocaleFromLanguage"
export * from "./uniq"
+export * from "./hashString"
diff --git a/src/utils/localStorage/types.ts b/src/utils/localStorage/types.ts
index 308d50a6d..3777aa6a6 100644
--- a/src/utils/localStorage/types.ts
+++ b/src/utils/localStorage/types.ts
@@ -39,4 +39,6 @@ export enum StoreKey {
AUTO_REFRESH_TABLES = "auto.refresh.tables",
SSO_USERNAME = "sso.username",
LEFT_PANEL_STATE = "left.panel.state",
+ AI_ASSISTANT_SETTINGS = "ai.assistant.settings",
+ AI_CHAT_PANEL_WIDTH = "ai.chat.panel.width",
}
diff --git a/src/utils/monacoInit.ts b/src/utils/monacoInit.ts
new file mode 100644
index 000000000..865e3fdae
--- /dev/null
+++ b/src/utils/monacoInit.ts
@@ -0,0 +1,19 @@
+import { loader } from "@monaco-editor/react"
+import dracula from "../scenes/Editor/Monaco/dracula"
+import { registerLanguageAddons } from "../scenes/Editor/Monaco/editor-addons"
+
+loader.config({
+ paths: {
+ vs: "assets/vs",
+ },
+})
+
+// This runs once at app startup, before any editor mounts
+export const monacoPromise = loader.init().then((monaco) => {
+ registerLanguageAddons(monaco)
+
+ monaco.editor.defineTheme("dracula", dracula)
+ monaco.editor.setTheme("dracula")
+
+ return monaco
+})
diff --git a/src/utils/questdb/client.ts b/src/utils/questdb/client.ts
index 81f9db7b1..a3693a41d 100644
--- a/src/utils/questdb/client.ts
+++ b/src/utils/questdb/client.ts
@@ -24,6 +24,9 @@ import {
Preferences,
Permission,
SymbolColumnDetails,
+ ValidateQueryResult,
+ ValidateQuerySuccessResult,
+ ValidateQueryErrorResult,
} from "./types"
import { ssoAuthState } from "../../modules/OAuth2/ssoAuthState"
@@ -335,6 +338,27 @@ export class Client {
}
}
+ async validateQuery(query: string): Promise {
+ const response = await fetch(
+ `api/v1/sql/validate?${Client.encodeParams({ query })}`,
+ {
+ headers: this.commonHeaders,
+ },
+ )
+ if (response.ok) {
+ return (await response.json()) as ValidateQuerySuccessResult
+ }
+
+ if (response.status === 400 || response.status === 403) {
+ return (await response.json()) as ValidateQueryErrorResult
+ }
+
+ return Promise.reject({
+ status: response.status,
+ statusText: response.statusText,
+ })
+ }
+
async showTables(): Promise> {
const response = await this.query("tables();")
diff --git a/src/utils/questdb/types.ts b/src/utils/questdb/types.ts
index b3126eedc..3e15a3146 100644
--- a/src/utils/questdb/types.ts
+++ b/src/utils/questdb/types.ts
@@ -128,6 +128,56 @@ export type QueryResult> =
| DdlResult
| NoticeResult
+type QueryType =
+ | "INSERT"
+ | "TRUNCATE"
+ | "ALTER TABLE"
+ | "SET"
+ | "DROP"
+ | "COPY"
+ | "CREATE TABLE"
+ | "INSERT AS SELECT"
+ | "COPY REMOTE"
+ | "RENAME TABLE"
+ | "REPAIR"
+ | "BACKUP TABLE"
+ | "UPDATE"
+ | "VACUUM"
+ | "BEGIN"
+ | "COMMIT"
+ | "ROLLBACK"
+ | "CREATE AS SELECT"
+ | "CHECKPOINT CREATE"
+ | "CHECKPOINT RELEASE"
+ | "DEALLOCATE"
+ | "EXPLAIN"
+ | "TABLE RESUME"
+
+export type ValidateQuerySuccessResult =
+ | {
+ query: string
+ columns: Array<{
+ name: string
+ type: string
+ dim?: number
+ elemType?: string
+ }>
+ timestamp: number
+ }
+ | {
+ queryType: QueryType
+ }
+
+export type ValidateQueryErrorResult = {
+ query: string
+ position: number
+ error: string
+}
+
+export type ValidateQueryResult =
+ | ValidateQuerySuccessResult
+ | ValidateQueryErrorResult
+
export type PartitionBy = "HOUR" | "DAY" | "WEEK" | "MONTH" | "YEAR" | "NONE"
export type Table = {
diff --git a/src/utils/questdbDocsRetrieval.ts b/src/utils/questdbDocsRetrieval.ts
new file mode 100644
index 000000000..b47038594
--- /dev/null
+++ b/src/utils/questdbDocsRetrieval.ts
@@ -0,0 +1,311 @@
+export type DocCategory =
+ | "functions"
+ | "operators"
+ | "sql"
+ | "concepts"
+ | "schema"
+
+export type ParsedDocItem = {
+ name: string
+ section?: string
+}
+
+/**
+ * Parse a documentation item string into name and optional section
+ * Handles formats like "Window Functions - avg()" or "Window Functions"
+ */
+export function parseDocItem(item: string): ParsedDocItem | null {
+ if (!item || !item.trim()) {
+ return null
+ }
+
+ const parts = item.split(/\s+-\s+/)
+ if (parts.length >= 2) {
+ return {
+ name: parts[0].trim(),
+ section: parts.slice(1).join(" - ").trim(),
+ }
+ }
+
+ return { name: item.trim() }
+}
+
+/**
+ * Parse multiple documentation item strings into an array of parsed items
+ */
+export function parseDocItems(
+ items: string[],
+): Array<{ name: string; section?: string }> {
+ return items
+ .map(parseDocItem)
+ .filter((item): item is ParsedDocItem => item !== null)
+}
+
+// Base URL for documentation
+const DOCS_BASE_URL = " https://questdb.com/docs"
+
+// Interface for metadata (no content, includes url)
+export interface DocFileMetadata {
+ path: string
+ title: string
+ headers: string[]
+ url: string
+}
+
+/**
+ * Fetch JSON from URL
+ */
+async function fetchJson(url: string): Promise {
+ const response = await fetch(url)
+ if (!response.ok) {
+ throw new Error(`Failed to fetch ${url}: ${response.statusText}`)
+ }
+ return response.json() as T
+}
+
+/**
+ * Fetch markdown content from URL
+ */
+async function fetchMarkdown(url: string): Promise {
+ const response = await fetch(url)
+ if (!response.ok) {
+ throw new Error(`Failed to fetch ${url}: ${response.statusText}`)
+ }
+ return response.text()
+}
+
+/**
+ * Get the table of contents for all QuestDB documentation
+ */
+export async function getQuestDBTableOfContents(): Promise {
+ const tocUrl = `${DOCS_BASE_URL}/web-console/toc-list.json`
+ const toc = await fetchJson>(tocUrl)
+
+ let result = "# QuestDB Documentation Table of Contents\n\n"
+
+ // Functions
+ result += "## Functions\n"
+ result += toc.functions.join(", ") + "\n\n"
+
+ // Operators
+ result += "## Operators\n"
+ result += toc.operators.join(", ") + "\n\n"
+
+ // SQL Keywords
+ result += "## SQL Syntax & Keywords\n"
+ result += toc.sql.join(", ") + "\n\n"
+
+ // Concepts
+ if (toc.concepts) {
+ result += "## Concepts\n"
+ result += toc.concepts.join(", ") + "\n\n"
+ }
+
+ // Schema
+ if (toc.schema) {
+ result += "## Schema\n"
+ result += toc.schema.join(", ") + "\n"
+ }
+
+ return result
+}
+
+/**
+ * Get documentation for specific items
+ */
+export async function getSpecificDocumentation(
+ category: DocCategory,
+ items: string[],
+): Promise {
+ // Fetch metadata for this category
+ const metadataUrl = `${DOCS_BASE_URL}/web-console/${category}-docs.json`
+ const categoryDocs = await fetchJson(metadataUrl)
+
+ if (!categoryDocs) {
+ return `Unknown category: ${category}`
+ }
+
+ const chunks: string[] = []
+ const processedPaths = new Set()
+
+ for (const item of items) {
+ const normalizedItem = item.toLowerCase().replace(/[^a-z0-9_]/g, "_")
+ const parsed = parseDocItem(item)
+ if (!parsed) continue
+
+ const queryTitle = parsed.name
+ const querySection = parsed.section
+ const hasTitleAndSection = !!querySection
+
+ // Find files containing this item
+ for (const file of categoryDocs) {
+ // Handle explicit "Title - Section" lookups
+ if (hasTitleAndSection && queryTitle && querySection) {
+ if (file.title.toLowerCase() === queryTitle.toLowerCase()) {
+ const matchingHeaderFromTitleSection = file.headers.find(
+ (h) =>
+ h.toLowerCase() === querySection.toLowerCase() ||
+ h.toLowerCase().replace(/[^a-z0-9_]/g, "_") ===
+ querySection.toLowerCase().replace(/[^a-z0-9_]/g, "_"),
+ )
+ if (
+ matchingHeaderFromTitleSection &&
+ !processedPaths.has(
+ `${file.path}::${matchingHeaderFromTitleSection}`,
+ )
+ ) {
+ processedPaths.add(
+ `${file.path}::${matchingHeaderFromTitleSection}`,
+ )
+
+ // Fetch the markdown content
+ const content = await fetchMarkdown(file.url)
+ const sectionContent = extractSection(
+ content,
+ matchingHeaderFromTitleSection,
+ )
+ if (sectionContent) {
+ chunks.push(
+ `### ${file.path} - ${matchingHeaderFromTitleSection}\n\n${sectionContent}`,
+ )
+ continue
+ }
+ }
+ }
+ }
+
+ // Check if file name matches
+ const fileKey = file.path
+ .split("/")
+ .pop()
+ ?.replace(".md", "")
+ .replace(/-/g, "_")
+ const hasItemInPath = fileKey === normalizedItem
+
+ // Check if title matches
+ const normalizedTitle = file.title
+ .toLowerCase()
+ .replace(/[^a-z0-9_]/g, "_")
+ const hasItemInTitle =
+ normalizedTitle === normalizedItem ||
+ file.title.toLowerCase() === item.toLowerCase()
+
+ // Check if any header matches
+ const hasItemInHeaders = file.headers.some(
+ (h) =>
+ h.toLowerCase().replace(/[^a-z0-9_]/g, "_") === normalizedItem ||
+ h.toLowerCase() === item.toLowerCase(),
+ )
+
+ if (
+ (hasItemInPath || hasItemInTitle || hasItemInHeaders) &&
+ !processedPaths.has(file.path)
+ ) {
+ processedPaths.add(file.path)
+
+ // Fetch the markdown content
+ const content = await fetchMarkdown(file.url)
+
+ // If looking for a specific function/operator, try to extract just that section
+ const matchingHeader = file.headers.find(
+ (h) =>
+ h.toLowerCase() === item.toLowerCase() ||
+ h.toLowerCase().replace(/[^a-z0-9_]/g, "_") === normalizedItem,
+ )
+
+ if (matchingHeader) {
+ const sectionContent = extractSection(content, matchingHeader)
+ if (sectionContent) {
+ chunks.push(
+ `### ${file.path} - ${matchingHeader}\n\n${sectionContent}`,
+ )
+ continue
+ }
+ }
+
+ // Otherwise include the whole file
+ chunks.push(`### ${file.path}\n\n${content}`)
+ }
+ }
+ }
+
+ if (chunks.length === 0) {
+ return `No documentation found for: ${items.join(", ")}`
+ }
+
+ return chunks.join("\n\n---\n\n")
+}
+
+/**
+ * Extract a specific section from markdown content
+ */
+function extractSection(content: string, sectionHeader: string): string | null {
+ const lines = content.split("\n")
+ let inSection = false
+ const sectionContent: string[] = []
+
+ for (let i = 0; i < lines.length; i++) {
+ const line = lines[i]
+
+ // Check if we found the section header
+ if (line === `## ${sectionHeader}`) {
+ inSection = true
+ sectionContent.push(line)
+ } else if (inSection) {
+ // Check if we reached the next section
+ if (line.match(/^##?\s/)) {
+ break
+ }
+ sectionContent.push(line)
+ }
+ }
+
+ return sectionContent.length > 0 ? sectionContent.join("\n") : null
+}
+
+/**
+ * Search for documentation by keyword
+ */
+export async function searchDocumentation(query: string): Promise {
+ const lowerQuery = query.toLowerCase()
+ const results: string[] = []
+
+ // Search in all categories
+ const categories: DocCategory[] = [
+ "functions",
+ "operators",
+ "sql",
+ "concepts",
+ "schema",
+ ]
+
+ for (const category of categories) {
+ const metadataUrl = `${DOCS_BASE_URL}/web-console/${category}-docs.json`
+ const docs = await fetchJson(metadataUrl)
+
+ for (const file of docs) {
+ // Check file name
+ if (file.path.toLowerCase().includes(lowerQuery)) {
+ results.push(`${category}/${file.title}`)
+ }
+
+ // Check headers
+ for (const header of file.headers) {
+ if (header.toLowerCase().includes(lowerQuery)) {
+ results.push(`${category}/${header}`)
+ }
+ }
+ }
+ }
+
+ if (results.length === 0) {
+ return `No results found for: ${query}`
+ }
+
+ return `Found ${results.length} results:\n${results.join("\n")}`
+}
+
+export async function getReferenceFull(): Promise {
+ const url = `${DOCS_BASE_URL}/reference-full.md`
+ return fetchMarkdown(url)
+}
diff --git a/src/utils/tokenCounting.ts b/src/utils/tokenCounting.ts
new file mode 100644
index 000000000..2e1bea26d
--- /dev/null
+++ b/src/utils/tokenCounting.ts
@@ -0,0 +1,98 @@
+import Anthropic from "@anthropic-ai/sdk"
+import { encoding_for_model, TiktokenModel } from "tiktoken"
+import type { Provider } from "./aiAssistantSettings"
+
+export interface ConversationMessage {
+ role: "user" | "assistant"
+ content: string
+}
+
+export const CONTEXT_LIMITS: Record = {
+ anthropic: 200_000,
+ openai: 400_000,
+}
+
+export const COMPACTION_THRESHOLDS: Record = {
+ anthropic: 150_000,
+ openai: 350_000,
+}
+
+export async function countTokensAnthropic(
+ client: Anthropic,
+ messages: ConversationMessage[],
+ systemPrompt: string,
+ model: string,
+): Promise {
+ const anthropicMessages: Anthropic.MessageParam[] = messages.map((m) => ({
+ role: m.role,
+ content: m.content,
+ }))
+
+ const response = await client.messages.countTokens({
+ model,
+ system: systemPrompt,
+ messages: anthropicMessages,
+ })
+
+ return response.input_tokens
+}
+
+let tiktokenEncoder: ReturnType