Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 12 additions & 1 deletion packages/frontend/@n8n/i18n/src/locales/en.json
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,18 @@
"chatHub.message.actions.executionId": "Execution ID",
"chatHub.message.edit.cancel": "Cancel",
"chatHub.message.edit.send": "Send",
"chatHub.message.error.unknown": "Error: Unknown error occurred",
"chatHub.message.error.unknown": "Something went wrong. Please try again.",
"chatHub.error.payloadTooLarge": "Your message is too large. Try using shorter text or fewer files.",
"chatHub.error.badRequest": "Something went wrong with your message. Please try again.",
"chatHub.error.forbidden": "You don't have permission to do this. Please check your settings.",
"chatHub.error.serverError": "We're having trouble right now. Please try again in a moment.",
"chatHub.error.serverErrorWithReason": "Something went wrong: {error}",
"chatHub.error.unknown": "Something went wrong. Please try again.",
"chatHub.error.noConnection": "Can't connect right now. Check your internet connection and try again.",
"chatHub.error.fetchConversationFailed": "Couldn't load this conversation",
"chatHub.error.sendMessageFailed": "Couldn't send the message",
"chatHub.error.updateModelFailed": "Couldn't change the AI model",
"chatHub.error.updateToolsFailed": "Couldn't update tools",
"chatHub.models.selector.defaultLabel": "Select model",
"chatHub.models.byIdSelector.title": "Choose {provider} model by ID",
"chatHub.models.byIdSelector.choose": "Enter model identifier (e.g. \"gpt-4\")",
Expand Down
49 changes: 47 additions & 2 deletions packages/frontend/@n8n/rest-api-client/src/utils.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ describe('streamRequest', () => {
expect(onErrorMock).not.toHaveBeenCalled();
});

it('should stream error response from the API endpoint', async () => {
it('should stream error response with error data from the API endpoint', async () => {
const testError = { code: 500, message: 'Error happened' };
const encoder = new TextEncoder();
const mockResponse = new ReadableStream({
Expand All @@ -66,6 +66,8 @@ describe('streamRequest', () => {

const mockFetch = vi.fn().mockResolvedValue({
ok: false,
status: 500,
statusText: 'Internal Server Error',
body: mockResponse,
});

Expand Down Expand Up @@ -98,8 +100,51 @@ describe('streamRequest', () => {
});

expect(onChunkMock).not.toHaveBeenCalled();
expect(onDoneMock).not.toHaveBeenCalled();
expect(onErrorMock).toHaveBeenCalledExactlyOnceWith(
new ResponseError(testError.message, { httpStatusCode: 500 }),
);
});

it('should call onError when stream ends immediately with non-ok status and no chunks', async () => {
const mockResponse = new ReadableStream({
start(controller) {
// Empty stream that just closes without sending any chunks
controller.close();
},
});

const mockFetch = vi.fn().mockResolvedValue({
ok: false,
status: 403,
statusText: 'Forbidden',
body: mockResponse,
});

global.fetch = mockFetch;

const onChunkMock = vi.fn();
const onDoneMock = vi.fn();
const onErrorMock = vi.fn();

await streamRequest(
{
baseUrl: 'https://api.example.com',
pushRef: '',
},
'/data',
{ key: 'value' },
onChunkMock,
onDoneMock,
onErrorMock,
);

expect(onChunkMock).not.toHaveBeenCalled();
expect(onDoneMock).not.toHaveBeenCalled();
expect(onErrorMock).toHaveBeenCalledTimes(1);
expect(onErrorMock).toHaveBeenCalledWith(new ResponseError(testError.message));
expect(onErrorMock).toHaveBeenCalledExactlyOnceWith(
new ResponseError('Forbidden', { httpStatusCode: 403 }),
);
});

it('should handle broken stream data', async () => {
Expand Down
24 changes: 18 additions & 6 deletions packages/frontend/@n8n/rest-api-client/src/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -234,6 +234,10 @@ export async function streamRequest<T extends object>(
separator = STREAM_SEPARATOR,
abortSignal?: AbortSignal,
): Promise<void> {
let onErrorOnce: ((e: Error) => void) | undefined = (e: Error) => {
onErrorOnce = undefined;
onError?.(e);
};
const headers: Record<string, string> = {
'browser-id': getBrowserId(),
'Content-Type': 'application/json',
Expand All @@ -258,7 +262,15 @@ export async function streamRequest<T extends object>(
async function readStream() {
const { done, value } = await reader.read();
if (done) {
onDone?.();
if (response.ok) {
onDone?.();
} else {
onErrorOnce?.(
new ResponseError(response.statusText, {
httpStatusCode: response.status,
}),
);
}
return;
}
const chunk = decoder.decode(value);
Expand Down Expand Up @@ -286,15 +298,15 @@ export async function streamRequest<T extends object>(
} else {
// Otherwise, call error callback
const message = 'message' in data ? data.message : response.statusText;
onError?.(
onErrorOnce?.(
new ResponseError(String(message), {
httpStatusCode: response.status,
}),
);
}
} catch (e: unknown) {
if (e instanceof Error) {
onError?.(e);
onErrorOnce?.(e);
}
}
}
Expand All @@ -304,11 +316,11 @@ export async function streamRequest<T extends object>(

// Start reading the stream
await readStream();
} else if (onError) {
onError(new Error(response.statusText));
} else if (onErrorOnce) {
onErrorOnce(new Error(response.statusText));
}
} catch (e: unknown) {
assert(e instanceof Error);
onError?.(e);
onErrorOnce?.(e);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ watch(
try {
await chatStore.fetchMessages(id);
} catch (error) {
toast.showError(error, 'Error fetching a conversation');
toast.showError(error, i18n.baseText('chatHub.error.fetchConversationFailed'));
await router.push({ name: CHAT_VIEW });
}
}
Expand Down Expand Up @@ -469,7 +469,7 @@ async function handleSelectModel(selection: ChatHubConversationModel, displayNam
try {
await chatStore.updateSessionModel(sessionId.value, selection, agentName);
} catch (error) {
toast.showError(error, 'Could not update selected model');
toast.showError(error, i18n.baseText('chatHub.error.updateModelFailed'));
}
} else {
defaultModel.value = { ...selection, cachedDisplayName: agentName };
Expand Down Expand Up @@ -503,7 +503,7 @@ async function handleUpdateTools(newTools: INode[]) {
try {
await chatStore.updateToolsInSession(sessionId.value, newTools);
} catch (error) {
toast.showError(error, 'Could not update selected tools');
toast.showError(error, i18n.baseText('chatHub.error.updateToolsFailed'));
}
}
}
Expand Down
41 changes: 28 additions & 13 deletions packages/frontend/editor-ui/src/features/ai/chatHub/chat.store.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import { defineStore } from 'pinia';
import { CHAT_STORE } from './constants';
import { computed, ref } from 'vue';
import { v4 as uuidv4 } from 'uuid';
import { useI18n } from '@n8n/i18n';
import {
fetchChatModelsApi,
sendMessageApi,
Expand Down Expand Up @@ -58,11 +59,13 @@ import { useTelemetry } from '@/app/composables/useTelemetry';
import { deepCopy, type INode } from 'n8n-workflow';
import type { ChatHubLLMProvider, ChatProviderSettingsDto } from '@n8n/api-types';
import { convertFileToBinaryData } from '@/app/utils/fileUtils';
import { ResponseError } from '@n8n/rest-api-client';

export const useChatStore = defineStore(CHAT_STORE, () => {
const rootStore = useRootStore();
const toast = useToast();
const telemetry = useTelemetry();
const i18n = useI18n();

const agents = ref<ChatModelsResponse>();
const customAgents = ref<Partial<Record<string, ChatHubAgentDto>>>({});
Expand Down Expand Up @@ -455,24 +458,36 @@ export const useChatStore = defineStore(CHAT_STORE, () => {
return;
}

toast.showError(error, 'Could not send message');
const cause =
error instanceof ResponseError
? new Error(getErrorMessageByStatusCode(error.httpStatusCode, error.message))
: error.message.includes('Failed to fetch')
? new Error(i18n.baseText('chatHub.error.noConnection'))
: error;

const { sessionId } = streaming.value;
toast.showError(cause, i18n.baseText('chatHub.error.sendMessageFailed'));

streaming.value = undefined;
}

const conversation = getConversation(sessionId);
if (!conversation) {
return;
}
function getErrorMessageByStatusCode(
statusCode: number | undefined,
message: string | undefined,
): string {
const errorMessages: Record<number, string> = {
[413]: i18n.baseText('chatHub.error.payloadTooLarge'),
[400]: i18n.baseText('chatHub.error.badRequest'),
[403]: i18n.baseText('chatHub.error.forbidden'),
[500]: message
? i18n.baseText('chatHub.error.serverErrorWithReason', {
interpolate: { error: message },
})
: i18n.baseText('chatHub.error.serverError'),
};

// TODO: Not sure if we want to mark all running messages as errored?
for (const messageId of conversation.activeMessageChain) {
const message = conversation.messages[messageId];
if (message.status === 'running') {
updateMessage(sessionId, messageId, 'error');
}
}
return (
(statusCode && errorMessages[statusCode]) || message || i18n.baseText('chatHub.error.unknown')
);
}

async function sendMessage(
Expand Down
Loading