From 4e689dfaa850ef917c4406a72ed52ef3e302d167 Mon Sep 17 00:00:00 2001 From: Nemo Godebski-Pedersen Date: Mon, 24 Mar 2025 15:25:13 +0000 Subject: [PATCH] migrate opey-functions to the pinia chat store --- src/components/ChatMessage.vue | 22 ++- src/models/MessageModel.ts | 55 +++++- src/obp/common-functions.ts | 25 ++- src/obp/opey-functions.ts | 159 ------------------ src/stores/chat.ts | 82 ++++++++- src/test/ChatWidget.test.ts | 12 -- src/test/chat.test.ts | 247 ++++++++++++++++++++++++++- src/test/common-functions.test.ts | 24 +++ src/test/opey-functions.test.ts | 271 ------------------------------ tsconfig.app.json | 2 +- 10 files changed, 435 insertions(+), 464 deletions(-) delete mode 100644 src/obp/opey-functions.ts create mode 100644 src/test/common-functions.test.ts delete mode 100644 src/test/opey-functions.test.ts diff --git a/src/components/ChatMessage.vue b/src/components/ChatMessage.vue index 0dbb1e3..239835d 100644 --- a/src/components/ChatMessage.vue +++ b/src/components/ChatMessage.vue @@ -1,4 +1,4 @@ - diff --git a/src/models/MessageModel.ts b/src/models/MessageModel.ts index 382af80..87dfa8d 100644 --- a/src/models/MessageModel.ts +++ b/src/models/MessageModel.ts @@ -1,6 +1,53 @@ +// Purpose: Define the message models for the chat stream +import { ToolCall } from '@langchain/core/messages' + + +// This is a schema for the raw message that we will get back from the Opey API, +// we adapt it to our own schema in the OpeyMessage interface +export interface RawOpeyMessage { + /** + * Role of the message. + * @example "human", "ai", "tool" + */ + type: "human" | "ai" | "tool"; + + /** + * Content of the message. + * @example "Hello, world!" + */ + content: string; + + /** + * Tool calls in the message. + */ + tool_calls: ToolCall[]; + + /** + * Whether this message is an approval request for a tool call. + */ + tool_approval_request: boolean; + + /** + * Tool call that this message is responding to. + * @example "call_Jja7J89XsjrOLA5r!MEOW!SL" + */ + tool_call_id?: string; + + /** + * Run ID of the message. + * @example "847c6285-8fc9-4560-a83f-4e6285809254" + */ + run_id?: string; + + /** + * Original LangChain message in serialized form. + */ + original?: Record; +} + export interface OpeyMessage { id: string; // i.e. UUID4 - role: string; + role: "assistant" | "user" | "tool"; content: string; error?: string; } @@ -13,6 +60,12 @@ export interface AssistantMessage extends OpeyMessage { // Probably we will need some fields here for tool call/ tool call approval requests } +export interface ToolMessage extends OpeyMessage { + pending: boolean; + awaitingApproval: boolean; + toolCall: ToolCall; +} + export interface ChatStreamInput { message: UserMessage; } \ No newline at end of file diff --git a/src/obp/common-functions.ts b/src/obp/common-functions.ts index 3b8f0ba..18ccaf0 100644 --- a/src/obp/common-functions.ts +++ b/src/obp/common-functions.ts @@ -86,16 +86,23 @@ export async function getOpeyJWT() { } export async function getobpConsent() { - await axios.post('/api/opey/consent').catch((error) => { - if (error.response) { - throw new Error(`getobpConsent returned an error: ${error.toJSON()}`); - } else { - throw new Error(`getobpConsent returned an error: ${error.message}`); + // Get consent from the Opey API + try { + const consentResponse = await fetch('/api/opey/consent', { + method: 'POST', + }) + + if (!consentResponse.ok) { + throw new Error(`Failed to get Opey consent: ${consentResponse.statusText}`); } - }).then((response) => { - console.log(response) - return response - }); + + const consent = await consentResponse.json(); + return consent + + } catch (error) { + console.error('Error getting Opey consent:', error); + throw new Error(`${error instanceof Error ? error.message : String(error)}`); + } } export async function answerobpConsentChallenge(answerBody: any) { diff --git a/src/obp/opey-functions.ts b/src/obp/opey-functions.ts deleted file mode 100644 index 8f6bebc..0000000 --- a/src/obp/opey-functions.ts +++ /dev/null @@ -1,159 +0,0 @@ -export interface OpeyMessage { - id: string; // i.e. UUID4 - role: string; - content: string; - error?: string; -} - -export interface UserMessage extends OpeyMessage { - isToolCallApproval: boolean; -} - -export interface AssistantMessage extends OpeyMessage { - // Probably we will need some fields here for tool call/ tool call approval requests -} - -export interface OpeyStreamContext { - currentAssistantMessage: AssistantMessage; - messages: OpeyMessage[]; - status: string; -} - -export interface obpConsentObject { - consent_id: string; -} - -async function pushOrUpdateOpeyMessage(currentMessage: OpeyMessage, context: OpeyStreamContext): Promise { - const existingMessage = context.messages.find(m => m.id === currentMessage.id); - if (existingMessage) { - // Update the existing message - existingMessage.content = currentMessage.content; - - } else { - // Add the new message - context.messages.push(currentMessage); - } -} -/** - * Process a stream from Opey API and update the message content - * @param stream The ReadableStream from the fetch response - * @param context The context object containing the message to update and status - * @returns A promise that resolves when the stream is complete - */ -export async function processOpeyStream( - stream: ReadableStream, - context: OpeyStreamContext -): Promise { - const reader = stream.getReader(); - let decoder = new TextDecoder(); - - try { - while (true) { - const { done, value } = await reader.read(); - - if (done) { - console.log('Stream complete'); - context.status = 'ready'; - break; - } - - const decodedValue = decoder.decode(value); - console.debug('Received:', decodedValue); //DEBUG - - // Parse the SSE data format - const lines = decodedValue.split('\n'); - for (const line of lines) { - if (line.startsWith('data: ') && line !== 'data: [DONE]') { - try { - const jsonStr = line.substring(6); // Remove 'data: ' - const data = JSON.parse(jsonStr); - - if (data.type === 'token' && data.content) { - // Append content to the current assistant message - context.currentAssistantMessage.content += data.content; - // Force Vue to detect the change - context.messages = [...context.messages]; - } - } catch (e) { - throw new Error(`Error parsing JSON: ${e}`); - } - } else if (line === 'data: [DONE]') { - // Add the current assistant message to the messages list - // We need to check if the current assistant message is not already in the list, if it is simply update the existing message - await pushOrUpdateOpeyMessage(context.currentAssistantMessage, context); - // Reset the current assistant message - context.currentAssistantMessage = { - id: '', - role: 'assistant', - content: '', - }; - } - } - } - } catch (error) { - console.error('Stream error:', error); - context.status = 'ready'; - throw error - } - -} - -export async function sendOpeyMessage( - message: UserMessage, - threadId: string, - context: OpeyStreamContext -): Promise { - try { - const response = await fetch('/api/opey/stream', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify({ - thread_id: threadId, - message: message.content, - is_tool_call_approval: message.isToolCallApproval - }) - }) - - const stream = response.body; - if (!stream) { - throw new Error('No stream returned from API') - } - - if (response.status !== 200) { - throw new Error(`Error sending Opey message: ${response.statusText}`); - } - - await processOpeyStream(stream, context); - } catch (error) { - console.error('Error sending Opey message:', error); - context.status = 'ready'; - throw new Error(`Error sending Opey message: ${error}`); - } - -} - - -export async function getobpConsent(): Promise { - // Get consent from the Opey API - try { - const consentResponse = await fetch('/api/opey/consent', { - method: 'POST', - }) - - if (!consentResponse.ok) { - throw new Error(`Failed to get Opey consent: ${consentResponse.statusText}`); - } - - const consent = await consentResponse.json(); - return consent - - } catch (error) { - console.error('Error getting Opey consent:', error); - throw new Error(`${error instanceof Error ? error.message : String(error)}`); - } - - - -} \ No newline at end of file diff --git a/src/stores/chat.ts b/src/stores/chat.ts index a5501e8..eca6d44 100644 --- a/src/stores/chat.ts +++ b/src/stores/chat.ts @@ -25,9 +25,9 @@ * */ -import type { OpeyMessage, ChatStreamInput } from '@/models/MessageModel' +import type { OpeyMessage, ChatStreamInput, RawOpeyMessage, ToolMessage } from '@/models/MessageModel' import type { Chat } from '@/models/ChatModel' -import { getobpConsent, processOpeyStream } from '@/obp/opey-functions' +import { getobpConsent } from '@/obp/common-functions' import { defineStore } from 'pinia' import { v4 as uuidv4 } from 'uuid' @@ -147,6 +147,10 @@ export const useChat = defineStore('chat', { }, async stream(input: ChatStreamInput): Promise { + + // By this point, if we have not set the thread ID we should do so + this.getThreadId() + // Add user message to chat this.addMessage(input.message) @@ -188,7 +192,7 @@ export const useChat = defineStore('chat', { status: this.status }; - await processOpeyStream(stream, context); + await this._processOpeyStream(stream); } catch (error) { console.error('Error sending Opey message:', error); @@ -199,6 +203,78 @@ export const useChat = defineStore('chat', { this.status = 'ready'; } + }, + + async _processOpeyStream(stream: ReadableStream): Promise { + const reader = stream.getReader(); + let decoder = new TextDecoder(); + + try { + while (true) { + const { done, value } = await reader.read(); + + if (done) { + console.log('Stream complete'); + this.status = 'ready'; + break; + } + + const decodedValue = decoder.decode(value); + console.debug('Received:', decodedValue); //DEBUG + + // Parse the SSE data format + const lines = decodedValue.split('\n'); + for (const line of lines) { + if (line.startsWith('data: ') && line !== 'data: [DONE]') { + try { + const jsonStr = line.substring(6); // Remove 'data: ' + const data = JSON.parse(jsonStr); + const content: RawOpeyMessage = data.content; + // This is where we process different types of messages from Opey by their 'type' field + // Process pending tool calls + if (data.type === 'message') { + console.log("Tool Calls: ", content) + for (const toolCall of content.tool_calls) { + + const toolMessage: ToolMessage = { + pending: true, + id: uuidv4(), + role: 'tool', + content: '', + awaitingApproval: false, + toolCall: toolCall + } + + this.addMessage(toolMessage) + } + } + if (data.type === 'token' && data.content) { + // Append content to the current assistant message + this.currentAssistantMessage.content += data.content; + // Force Vue to detect the change + this.messages = [...this.messages]; + } + } catch (e) { + throw new Error(`Error parsing JSON: ${e}`); + } + } else if (line === 'data: [DONE]') { + // Add the current assistant message to the messages list + // We need to check if the current assistant message is not already in the list, if it is simply update the existing message + await this.addMessage(this.currentAssistantMessage); + // Reset the current assistant message + this.currentAssistantMessage = { + id: '', + role: 'assistant', + content: '', + }; + } + } + } + } catch (error) { + console.error('Stream error:', error); + this.status = 'ready'; + throw error + } } } diff --git a/src/test/ChatWidget.test.ts b/src/test/ChatWidget.test.ts index ca41c33..631e273 100644 --- a/src/test/ChatWidget.test.ts +++ b/src/test/ChatWidget.test.ts @@ -1,27 +1,15 @@ import { mount } from '@vue/test-utils'; import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; import ChatWidget from '../components/ChatWidget.vue' -import { OpeyStreamContext } from '@/obp/opey-functions'; import { setActivePinia, createPinia } from 'pinia'; describe('ChatWidget', () => { - let mockContext: OpeyStreamContext; beforeEach(() => { // Init Pinia Store setActivePinia(createPinia()) - mockContext = { - currentAssistantMessage: { - id: '123', - role: 'assistant', - content: '', - }, - messages: [], - status: 'loading', - } - // create a mock stream const mockStream = new ReadableStream({ start(controller) { diff --git a/src/test/chat.test.ts b/src/test/chat.test.ts index 5810c93..997ccf2 100644 --- a/src/test/chat.test.ts +++ b/src/test/chat.test.ts @@ -1,5 +1,6 @@ // Tesing the Pinia chat store in src/stores/chat.ts - +import type { OpeyMessage, ToolMessage } from '@/models/MessageModel' +import { ToolCall } from '@langchain/core/messages' import { useChat } from '@/stores/chat' import { beforeEach, describe, it, expect, vi } from 'vitest' import { setActivePinia, createPinia } from 'pinia' @@ -34,6 +35,18 @@ describe('Chat Store', () => { expect(newThreadId).toBe('1234') }) + it('should set its own thread ID if stream is called without a thread ID set already', async () => { + const chatStore = useChat() + await chatStore.stream({message: { + content: 'Hello Opey', + role: 'user', + id: '123', + isToolCallApproval: false + }}) + expect(chatStore.threadId).toBeDefined() + expect(chatStore.threadId).not.toBe('') + }) + it('should apply an error state to the assistant message on error', async () => { // mock the fetch function with a rejected promise global.fetch = vi.fn(() => @@ -91,4 +104,236 @@ describe('Chat Store', () => { }, }); }) +}) + +describe('Chat Store _proccessOpeyStream', () => { + let mockStream: ReadableStream + + let chatStore: ReturnType + + beforeEach(() => { + // Set the active Pinia store + setActivePinia(createPinia()) + chatStore = useChat() + // create a mock stream + mockStream = new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"Hello"}\n`)); + controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":" world!"}\n`)); + controller.close(); + } + }) + }) + + it('should update context with streamed content', async () => { + + + // Mock a ReadableStream + const mockAsisstantMessage = "Hi I'm Opey, your personal banking assistant. I'll certainly not take over the world, no, not at all!" + + // Split the message into chunks, but reappend the whitespace (this is to simulate llm tokens) + const mockMessageChunks = mockAsisstantMessage.split(" ") + for (let i = 0; i < mockMessageChunks.length; i++) { + // Don't add whitespace to the last chunk + if (i === mockMessageChunks.length - 1 ) { + mockMessageChunks[i] = `${mockMessageChunks[i]}` + break + } + mockMessageChunks[i] = `${mockMessageChunks[i]} ` + } + + // Fake the token stream + const stream = new ReadableStream({ + start(controller) { + for (let i = 0; i < mockMessageChunks.length; i++) { + controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"${mockMessageChunks[i]}"}\n`)); + } + controller.close(); + }, + }); + + await chatStore._processOpeyStream(stream) + console.log(chatStore.currentAssistantMessage.content) + expect(chatStore.currentAssistantMessage.content).toBe(mockAsisstantMessage) + }) + + it('should throw an error when the stream is closed by the server', async () => { + const brokenStream = new ReadableStream({ + start(controller) { + for (let i = 0; i < 10; i++) { + if (i === 5) { + controller.error(new Error('Stream closed by server')) + break; + } + controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"test"}\n`)); + } + + }, + }); + + await expect(chatStore._processOpeyStream(brokenStream)) + .rejects + .toThrow('Stream closed by server') + }) + + it('should be able a chunk with type: message and a tool call in the body', async () => { + // create a mock stream + const mockStream = new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode(`data: {"type": "message", "content": {"type": "ai", "content": "", "tool_calls": [{"name": "retrieve_glossary", "args": {"question": "hre"}, "id": "call_XsmUpPIeS81l9MYpieBZtr4w", "type": "tool_call"}], "tool_approval_request": false, "tool_call_id": null, "run_id": "d0c2bcbe-62f7-464b-8564-bf9263939fe1", "original": {"type": "ai", "data": {"content": "", "additional_kwargs": {"tool_calls": [{"index": 0, "id": "call_XsmUpPIeS81l9MYpieBZtr4w", "function": {"arguments": "{\\"question\\":\\"hre\\"}", "name": "retrieve_glossary"}, "type": "function"}]}, "response_metadata": {"finish_reason": "tool_calls", "model_name": "gpt-4o-2024-08-06", "system_fingerprint": "fp_eb9dce56a8"}, "type": "ai", "name": null, "id": "run-5bb065b9-440d-4678-bbdb-cd6de94a78d3", "example": false, "tool_calls": [{"name": "retrieve_glossary", "args": {"question": "hre"}, "id": "call_XsmUpPIeS81l9MYpieBZtr4w", "type": "tool_call"}], "invalid_tool_calls": [], "usage_metadata": null}}}}\n`)); + controller.close(); + }, + }); + + // mock the fetch function + global.fetch = vi.fn(() => + Promise.resolve(new Response(mockStream, { + headers: { 'content-type': 'text/event-stream' }, + status: 200, + })) + ); + + await chatStore._processOpeyStream(mockStream) + + expect(chatStore.messages).toHaveLength(1) + + const toolMessage: ToolMessage = chatStore.messages[0] as ToolMessage + + expect(toolMessage.awaitingApproval).toBe(false) + expect(toolMessage.toolCall).toBeTypeOf('object') + expect(toolMessage.pending).toBe(true) + // Instead of checking instance directly, verify the object has the expected properties + expect(toolMessage.toolCall).toEqual(expect.objectContaining({ + name: 'retrieve_glossary', + args: expect.objectContaining({ + question: 'hre' + }), + id: 'call_XsmUpPIeS81l9MYpieBZtr4w', + type: 'tool_call' + })) + expect(toolMessage.content).toBe('') + }) + + it('should throw an error when the chunk is not valid json', async () => { + const invalidJsonStream = new ReadableStream({ + start(controller) { + for (let i=0; i<10; i++) { + controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"test"}\n`)); + if (i === 5) { + controller.enqueue(new TextEncoder().encode('data: "type":"token","content":"test"}\n')); + } + } + controller.close(); + + } + }) + + await expect(chatStore._processOpeyStream(invalidJsonStream)) + .rejects + .toThrowError() + }) + + it("should set status to 'ready' when completed", async () => { + const stream = new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"test"}\n`)); + controller.close(); + } + }) + + await chatStore._processOpeyStream(stream) + expect(chatStore.status).toBe('ready') + }) + + it("should clear the placeholder assistant message, and update last assistant message when recieving the [DONE] signal", async () => { + // Mock a ReadableStream + const mockAsisstantMessage = "Hi I'm Opey, your personal banking assistant. I'll certainly not take over the world, no, not at all!" + // Split the message into chunks, but reappend the whitespace (this is to simulate llm tokens) + const mockMessageChunks = mockAsisstantMessage.split(" ") + for (let i = 0; i < mockMessageChunks.length; i++) { + // Don't add whitespace to the last chunk + if (i === mockMessageChunks.length - 1 ) { + mockMessageChunks[i] = `${mockMessageChunks[i]}` + break + } + mockMessageChunks[i] = `${mockMessageChunks[i]} ` + } + + // Fake the token stream + const stream = new ReadableStream({ + start(controller) { + for (let i = 0; i < mockMessageChunks.length; i++) { + controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"${mockMessageChunks[i]}"}\n`)); + } + controller.enqueue(new TextEncoder().encode(`data: [DONE]\n`)); + controller.close(); + }, + }); + + // Replace current assistant message with a more unique one for our test + chatStore.currentAssistantMessage = { + id: '456', + role: 'assistant', + content: '', + } + + // Push assistant message to the messages list as this is what we do in the ChatWidget to visualise token streaming + chatStore.addMessage(chatStore.currentAssistantMessage) + + await chatStore._processOpeyStream(stream) + // assert that the current assistant 'placeholder' message was reset + expect(chatStore.currentAssistantMessage.content).toBe('') + // assert that the assistant message was added to the messages list + console.log(chatStore.messages) + expect(chatStore.messages).toContainEqual({ + id: '456', + role: 'assistant', + content: mockAsisstantMessage, + }) + + + }) + it("should have a unique set of messages", async () => { + // mock the stream as above + // Mock a ReadableStream + const mockAsisstantMessage = "Hi I'm Opey, your personal banking assistant. I'll certainly not take over the world, no, not at all!" + // Split the message into chunks, but reappend the whitespace (this is to simulate llm tokens) + const mockMessageChunks = mockAsisstantMessage.split(" ") + for (let i = 0; i < mockMessageChunks.length; i++) { + // Don't add whitespace to the last chunk + if (i === mockMessageChunks.length - 1 ) { + mockMessageChunks[i] = `${mockMessageChunks[i]}` + break + } + mockMessageChunks[i] = `${mockMessageChunks[i]} ` + } + + // Fake the token stream + const stream = new ReadableStream({ + start(controller) { + for (let i = 0; i < mockMessageChunks.length; i++) { + controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"${mockMessageChunks[i]}"}\n`)); + } + controller.enqueue(new TextEncoder().encode(`data: [DONE]\n`)); + controller.close(); + }, + }); + + // Replace current assistant message with a more unique one for our test + chatStore.currentAssistantMessage = { + id: '456', + role: 'assistant', + content: '', + } + + // Push assistant message to the messages list as this is what we do in the ChatWidget to visualise token streaming + chatStore.addMessage(chatStore.currentAssistantMessage) + + await chatStore._processOpeyStream(stream) + + function hasUniqueValues(arr: OpeyMessage[]): boolean { + return arr.filter((value, index, self) => self.indexOf(value) === index).length === arr.length; + } + expect(hasUniqueValues(chatStore.messages)).toBe(true) + }) }) \ No newline at end of file diff --git a/src/test/common-functions.test.ts b/src/test/common-functions.test.ts new file mode 100644 index 0000000..46aee53 --- /dev/null +++ b/src/test/common-functions.test.ts @@ -0,0 +1,24 @@ +import { describe, vi, expect, it, beforeEach } from 'vitest' +import { getobpConsent } from '@/obp/common-functions'; + +describe('getobpConsent', () => { + + beforeEach(() => { + global.fetch = vi.fn(() => + Promise.resolve(new Response(JSON.stringify({consent_id: 1234}), { + headers: { 'content-type': 'application/json' }, + status: 200, + })) + ); + }) + + it('should call fetch', async () => { + await getobpConsent() + expect(global.fetch).toHaveBeenCalled() + }) + + it('should return a consent id', async () => { + const consentId = await getobpConsent() + expect(consentId).toStrictEqual({consent_id: 1234}) + }) +}) \ No newline at end of file diff --git a/src/test/opey-functions.test.ts b/src/test/opey-functions.test.ts deleted file mode 100644 index f5aff76..0000000 --- a/src/test/opey-functions.test.ts +++ /dev/null @@ -1,271 +0,0 @@ -import * as OpeyModule from '@/obp/opey-functions'; -import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; - -describe('processOpeyStream', async () => { - let mockContext: OpeyModule.OpeyStreamContext; - - beforeEach(() => { - // Reset the mock context before each test - mockContext = { - currentAssistantMessage: { - id: '123', - role: 'assistant', - content: '', - }, - messages: [], - status: 'loading', - } - }) - it('should update context with streamed content', async () => { - // Mock a ReadableStream - const mockAsisstantMessage = "Hi I'm Opey, your personal banking assistant. I'll certainly not take over the world, no, not at all!" - - // Split the message into chunks, but reappend the whitespace (this is to simulate llm tokens) - const mockMessageChunks = mockAsisstantMessage.split(" ") - for (let i = 0; i < mockMessageChunks.length; i++) { - // Don't add whitespace to the last chunk - if (i === mockMessageChunks.length - 1 ) { - mockMessageChunks[i] = `${mockMessageChunks[i]}` - break - } - mockMessageChunks[i] = `${mockMessageChunks[i]} ` - } - - // Fake the token stream - const stream = new ReadableStream({ - start(controller) { - for (let i = 0; i < mockMessageChunks.length; i++) { - controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"${mockMessageChunks[i]}"}\n`)); - } - controller.close(); - }, - }); - - await OpeyModule.processOpeyStream(stream, mockContext) - console.log(mockContext.currentAssistantMessage.content) - expect(mockContext.currentAssistantMessage.content).toBe(mockAsisstantMessage) - }) - - it('should throw an error when the stream is closed by the server', async () => { - const brokenStream = new ReadableStream({ - start(controller) { - for (let i = 0; i < 10; i++) { - if (i === 5) { - controller.error(new Error('Stream closed by server')) - break; - } - controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"test"}\n`)); - } - - }, - }); - - await expect(OpeyModule.processOpeyStream(brokenStream, mockContext)) - .rejects - .toThrow('Stream closed by server') - }) - - it('should be able to handle empty content', async () => { - - }) - - it('should throw an error when the chunk is not valid json', async () => { - const invalidJsonStream = new ReadableStream({ - start(controller) { - for (let i=0; i<10; i++) { - controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"test"}\n`)); - if (i === 5) { - controller.enqueue(new TextEncoder().encode('data: "type":"token","content":"test"}\n')); - } - } - controller.close(); - - } - }) - - await expect(OpeyModule.processOpeyStream(invalidJsonStream, mockContext)) - .rejects - .toThrowError() - }) - - it("should set status to 'ready' when completed", async () => { - const stream = new ReadableStream({ - start(controller) { - controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"test"}\n`)); - controller.close(); - } - }) - - await OpeyModule.processOpeyStream(stream, mockContext) - expect(mockContext.status).toBe('ready') - }) - - it("should clear the placeholder assistant message, and update last assistant message when recieving the [DONE] signal", async () => { - // Mock a ReadableStream - const mockAsisstantMessage = "Hi I'm Opey, your personal banking assistant. I'll certainly not take over the world, no, not at all!" - // Split the message into chunks, but reappend the whitespace (this is to simulate llm tokens) - const mockMessageChunks = mockAsisstantMessage.split(" ") - for (let i = 0; i < mockMessageChunks.length; i++) { - // Don't add whitespace to the last chunk - if (i === mockMessageChunks.length - 1 ) { - mockMessageChunks[i] = `${mockMessageChunks[i]}` - break - } - mockMessageChunks[i] = `${mockMessageChunks[i]} ` - } - - // Fake the token stream - const stream = new ReadableStream({ - start(controller) { - for (let i = 0; i < mockMessageChunks.length; i++) { - controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"${mockMessageChunks[i]}"}\n`)); - } - controller.enqueue(new TextEncoder().encode(`data: [DONE]\n`)); - controller.close(); - }, - }); - - // Replace current assistant message with a more unique one for our test - mockContext.currentAssistantMessage = { - id: '456', - role: 'assistant', - content: '', - } - - // Push assistant message to the messages list as this is what we do in the ChatWidget to visualise token streaming - mockContext.messages.push(mockContext.currentAssistantMessage) - - await OpeyModule.processOpeyStream(stream, mockContext) - // assert that the current assistant 'placeholder' message was reset - expect(mockContext.currentAssistantMessage.content).toBe('') - // assert that the assistant message was added to the messages list - console.log(mockContext.messages) - expect(mockContext.messages).toContainEqual({ - id: '456', - role: 'assistant', - content: mockAsisstantMessage, - }) - - - }) - it("should have a unique set of messages", async () => { - // mock the stream as above - // Mock a ReadableStream - const mockAsisstantMessage = "Hi I'm Opey, your personal banking assistant. I'll certainly not take over the world, no, not at all!" - // Split the message into chunks, but reappend the whitespace (this is to simulate llm tokens) - const mockMessageChunks = mockAsisstantMessage.split(" ") - for (let i = 0; i < mockMessageChunks.length; i++) { - // Don't add whitespace to the last chunk - if (i === mockMessageChunks.length - 1 ) { - mockMessageChunks[i] = `${mockMessageChunks[i]}` - break - } - mockMessageChunks[i] = `${mockMessageChunks[i]} ` - } - - // Fake the token stream - const stream = new ReadableStream({ - start(controller) { - for (let i = 0; i < mockMessageChunks.length; i++) { - controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"${mockMessageChunks[i]}"}\n`)); - } - controller.enqueue(new TextEncoder().encode(`data: [DONE]\n`)); - controller.close(); - }, - }); - - // Replace current assistant message with a more unique one for our test - mockContext.currentAssistantMessage = { - id: '456', - role: 'assistant', - content: '', - } - - // Push assistant message to the messages list as this is what we do in the ChatWidget to visualise token streaming - mockContext.messages.push(mockContext.currentAssistantMessage) - - await OpeyModule.processOpeyStream(stream, mockContext) - - function hasUniqueValues(arr: OpeyModule.OpeyMessage[]): boolean { - return arr.filter((value, index, self) => self.indexOf(value) === index).length === arr.length; - } - expect(hasUniqueValues(mockContext.messages)).toBe(true) - }) - -}) - -describe('sendOpeyMessage', () => { - let mockContext: OpeyModule.OpeyStreamContext; - let testUserMessage: OpeyModule.UserMessage; - - beforeEach(() => { - mockContext = { - currentAssistantMessage: { - id: '123', - role: 'assistant', - content: '', - }, - messages: [], - status: 'loading', - } - - // create a mock stream - const mockStream = new ReadableStream({ - start(controller) { - controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"test"}\n`)); - controller.close(); - }, - }); - - // mock the fetch function - global.fetch = vi.fn(() => - Promise.resolve(new Response(mockStream, { - headers: { 'content-type': 'text/event-stream' }, - status: 200, - })) - ); - - testUserMessage = { - id: '123', - role: 'user', - content: 'test message', - isToolCallApproval: false, - } - }) - afterEach(() => { - vi.clearAllMocks() - }) - it('should call fetch', async () => { - await OpeyModule.sendOpeyMessage(testUserMessage, '123', mockContext) - - expect(global.fetch).toHaveBeenCalled() - }) - it("should push the 'ready' status to the context after success", async () => { - - await OpeyModule.sendOpeyMessage(testUserMessage, '123', mockContext) - - expect(mockContext.status).toBe('ready') - }) -}) - -describe('getobpConsent', () => { - - beforeEach(() => { - global.fetch = vi.fn(() => - Promise.resolve(new Response(JSON.stringify({consent_id: 1234}), { - headers: { 'content-type': 'application/json' }, - status: 200, - })) - ); - }) - - it('should call fetch', async () => { - await OpeyModule.getobpConsent() - expect(global.fetch).toHaveBeenCalled() - }) - - it('should return a consent id', async () => { - const consentId = await OpeyModule.getobpConsent() - expect(consentId).toStrictEqual({consent_id: 1234}) - }) -}) \ No newline at end of file diff --git a/tsconfig.app.json b/tsconfig.app.json index 361e1f0..745113c 100644 --- a/tsconfig.app.json +++ b/tsconfig.app.json @@ -1,6 +1,6 @@ { "extends": "@vue/tsconfig/tsconfig.web.json", - "include": ["env.d.ts", "src/**/*", "src/**/*.vue"], + "include": ["env.d.ts", "src/**/*", "src/**/*.vue", "test/integration.test.ts"], "exclude": ["src/**/__tests__/*"], "compilerOptions": { "esModuleInterop": true,