mirror of
https://github.com/OpenBankProject/API-Explorer-II.git
synced 2026-02-06 10:47:04 +00:00
migrate opey-functions to the pinia chat store
This commit is contained in:
parent
ceb6f5c485
commit
4e689dfaa8
@ -1,4 +1,4 @@
|
||||
<script>
|
||||
<script lang="ts">
|
||||
|
||||
import MarkdownIt from "markdown-it";
|
||||
|
||||
@ -22,6 +22,7 @@ import 'prismjs/components/prism-http';
|
||||
import 'prismjs/themes/prism-okaidia.css';
|
||||
|
||||
import { Warning } from '@element-plus/icons-vue'
|
||||
import type { ToolMessage } from "@/models/MessageModel";
|
||||
|
||||
export default {
|
||||
props: {
|
||||
@ -37,7 +38,7 @@ export default {
|
||||
}
|
||||
},
|
||||
methods: {
|
||||
highlightCode(content, language) {
|
||||
highlightCode(content: string, language: string) {
|
||||
if (Prism.languages[language]) {
|
||||
return Prism.highlight(content, Prism.languages[language], language);
|
||||
} else {
|
||||
@ -46,9 +47,9 @@ export default {
|
||||
return content;
|
||||
}
|
||||
},
|
||||
renderMarkdown(content) {
|
||||
renderMarkdown(content: string) {
|
||||
const markdown = new MarkdownIt({
|
||||
highlight: (str, lang) => {
|
||||
highlight: (str, lang): string => {
|
||||
if (lang && Prism.languages[lang]) {
|
||||
try {
|
||||
return `<pre class="language-${lang}"><code>${this.highlightCode(str, lang)}</code></pre>`;
|
||||
@ -73,11 +74,18 @@ export default {
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<div :class="this.message.role">
|
||||
<div v-if="message.role !== 'tool'" :class="message.role">
|
||||
<div class="message-container">
|
||||
<div class="content" v-html="renderMarkdown(this.message.content)"></div>
|
||||
<div class="content" v-html="renderMarkdown(message.content)"></div>
|
||||
</div>
|
||||
<div v-if="message.error" class="error"><el-icon><Warning /></el-icon> {{ message.error }}</div>
|
||||
</div>
|
||||
<div v-else-if="message.role === 'tool'">
|
||||
<div class="tool-message-container">
|
||||
<el-collapse>
|
||||
<el-collapse-item title=""></el-collapse-item>
|
||||
</el-collapse>
|
||||
</div>
|
||||
<div v-if="this.message.error" class="error"><el-icon><Warning /></el-icon> {{ this.message.error }}</div>
|
||||
</div>
|
||||
|
||||
</template>
|
||||
|
||||
@ -1,6 +1,53 @@
|
||||
// Purpose: Define the message models for the chat stream
|
||||
import { ToolCall } from '@langchain/core/messages'
|
||||
|
||||
|
||||
// This is a schema for the raw message that we will get back from the Opey API,
|
||||
// we adapt it to our own schema in the OpeyMessage interface
|
||||
export interface RawOpeyMessage {
|
||||
/**
|
||||
* Role of the message.
|
||||
* @example "human", "ai", "tool"
|
||||
*/
|
||||
type: "human" | "ai" | "tool";
|
||||
|
||||
/**
|
||||
* Content of the message.
|
||||
* @example "Hello, world!"
|
||||
*/
|
||||
content: string;
|
||||
|
||||
/**
|
||||
* Tool calls in the message.
|
||||
*/
|
||||
tool_calls: ToolCall[];
|
||||
|
||||
/**
|
||||
* Whether this message is an approval request for a tool call.
|
||||
*/
|
||||
tool_approval_request: boolean;
|
||||
|
||||
/**
|
||||
* Tool call that this message is responding to.
|
||||
* @example "call_Jja7J89XsjrOLA5r!MEOW!SL"
|
||||
*/
|
||||
tool_call_id?: string;
|
||||
|
||||
/**
|
||||
* Run ID of the message.
|
||||
* @example "847c6285-8fc9-4560-a83f-4e6285809254"
|
||||
*/
|
||||
run_id?: string;
|
||||
|
||||
/**
|
||||
* Original LangChain message in serialized form.
|
||||
*/
|
||||
original?: Record<string, any>;
|
||||
}
|
||||
|
||||
export interface OpeyMessage {
|
||||
id: string; // i.e. UUID4
|
||||
role: string;
|
||||
role: "assistant" | "user" | "tool";
|
||||
content: string;
|
||||
error?: string;
|
||||
}
|
||||
@ -13,6 +60,12 @@ export interface AssistantMessage extends OpeyMessage {
|
||||
// Probably we will need some fields here for tool call/ tool call approval requests
|
||||
}
|
||||
|
||||
export interface ToolMessage extends OpeyMessage {
|
||||
pending: boolean;
|
||||
awaitingApproval: boolean;
|
||||
toolCall: ToolCall;
|
||||
}
|
||||
|
||||
export interface ChatStreamInput {
|
||||
message: UserMessage;
|
||||
}
|
||||
@ -86,16 +86,23 @@ export async function getOpeyJWT() {
|
||||
}
|
||||
|
||||
export async function getobpConsent() {
|
||||
await axios.post('/api/opey/consent').catch((error) => {
|
||||
if (error.response) {
|
||||
throw new Error(`getobpConsent returned an error: ${error.toJSON()}`);
|
||||
} else {
|
||||
throw new Error(`getobpConsent returned an error: ${error.message}`);
|
||||
// Get consent from the Opey API
|
||||
try {
|
||||
const consentResponse = await fetch('/api/opey/consent', {
|
||||
method: 'POST',
|
||||
})
|
||||
|
||||
if (!consentResponse.ok) {
|
||||
throw new Error(`Failed to get Opey consent: ${consentResponse.statusText}`);
|
||||
}
|
||||
}).then((response) => {
|
||||
console.log(response)
|
||||
return response
|
||||
});
|
||||
|
||||
const consent = await consentResponse.json();
|
||||
return consent
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error getting Opey consent:', error);
|
||||
throw new Error(`${error instanceof Error ? error.message : String(error)}`);
|
||||
}
|
||||
}
|
||||
|
||||
export async function answerobpConsentChallenge(answerBody: any) {
|
||||
|
||||
@ -1,159 +0,0 @@
|
||||
export interface OpeyMessage {
|
||||
id: string; // i.e. UUID4
|
||||
role: string;
|
||||
content: string;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
export interface UserMessage extends OpeyMessage {
|
||||
isToolCallApproval: boolean;
|
||||
}
|
||||
|
||||
export interface AssistantMessage extends OpeyMessage {
|
||||
// Probably we will need some fields here for tool call/ tool call approval requests
|
||||
}
|
||||
|
||||
export interface OpeyStreamContext {
|
||||
currentAssistantMessage: AssistantMessage;
|
||||
messages: OpeyMessage[];
|
||||
status: string;
|
||||
}
|
||||
|
||||
export interface obpConsentObject {
|
||||
consent_id: string;
|
||||
}
|
||||
|
||||
async function pushOrUpdateOpeyMessage(currentMessage: OpeyMessage, context: OpeyStreamContext): Promise<void> {
|
||||
const existingMessage = context.messages.find(m => m.id === currentMessage.id);
|
||||
if (existingMessage) {
|
||||
// Update the existing message
|
||||
existingMessage.content = currentMessage.content;
|
||||
|
||||
} else {
|
||||
// Add the new message
|
||||
context.messages.push(currentMessage);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Process a stream from Opey API and update the message content
|
||||
* @param stream The ReadableStream from the fetch response
|
||||
* @param context The context object containing the message to update and status
|
||||
* @returns A promise that resolves when the stream is complete
|
||||
*/
|
||||
export async function processOpeyStream(
|
||||
stream: ReadableStream<Uint8Array>,
|
||||
context: OpeyStreamContext
|
||||
): Promise<void> {
|
||||
const reader = stream.getReader();
|
||||
let decoder = new TextDecoder();
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
|
||||
if (done) {
|
||||
console.log('Stream complete');
|
||||
context.status = 'ready';
|
||||
break;
|
||||
}
|
||||
|
||||
const decodedValue = decoder.decode(value);
|
||||
console.debug('Received:', decodedValue); //DEBUG
|
||||
|
||||
// Parse the SSE data format
|
||||
const lines = decodedValue.split('\n');
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ') && line !== 'data: [DONE]') {
|
||||
try {
|
||||
const jsonStr = line.substring(6); // Remove 'data: '
|
||||
const data = JSON.parse(jsonStr);
|
||||
|
||||
if (data.type === 'token' && data.content) {
|
||||
// Append content to the current assistant message
|
||||
context.currentAssistantMessage.content += data.content;
|
||||
// Force Vue to detect the change
|
||||
context.messages = [...context.messages];
|
||||
}
|
||||
} catch (e) {
|
||||
throw new Error(`Error parsing JSON: ${e}`);
|
||||
}
|
||||
} else if (line === 'data: [DONE]') {
|
||||
// Add the current assistant message to the messages list
|
||||
// We need to check if the current assistant message is not already in the list, if it is simply update the existing message
|
||||
await pushOrUpdateOpeyMessage(context.currentAssistantMessage, context);
|
||||
// Reset the current assistant message
|
||||
context.currentAssistantMessage = {
|
||||
id: '',
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Stream error:', error);
|
||||
context.status = 'ready';
|
||||
throw error
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
export async function sendOpeyMessage(
|
||||
message: UserMessage,
|
||||
threadId: string,
|
||||
context: OpeyStreamContext
|
||||
): Promise<void> {
|
||||
try {
|
||||
const response = await fetch('/api/opey/stream', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
thread_id: threadId,
|
||||
message: message.content,
|
||||
is_tool_call_approval: message.isToolCallApproval
|
||||
})
|
||||
})
|
||||
|
||||
const stream = response.body;
|
||||
if (!stream) {
|
||||
throw new Error('No stream returned from API')
|
||||
}
|
||||
|
||||
if (response.status !== 200) {
|
||||
throw new Error(`Error sending Opey message: ${response.statusText}`);
|
||||
}
|
||||
|
||||
await processOpeyStream(stream, context);
|
||||
} catch (error) {
|
||||
console.error('Error sending Opey message:', error);
|
||||
context.status = 'ready';
|
||||
throw new Error(`Error sending Opey message: ${error}`);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
export async function getobpConsent(): Promise<obpConsentObject> {
|
||||
// Get consent from the Opey API
|
||||
try {
|
||||
const consentResponse = await fetch('/api/opey/consent', {
|
||||
method: 'POST',
|
||||
})
|
||||
|
||||
if (!consentResponse.ok) {
|
||||
throw new Error(`Failed to get Opey consent: ${consentResponse.statusText}`);
|
||||
}
|
||||
|
||||
const consent = await consentResponse.json();
|
||||
return consent
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error getting Opey consent:', error);
|
||||
throw new Error(`${error instanceof Error ? error.message : String(error)}`);
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
@ -25,9 +25,9 @@
|
||||
*
|
||||
*/
|
||||
|
||||
import type { OpeyMessage, ChatStreamInput } from '@/models/MessageModel'
|
||||
import type { OpeyMessage, ChatStreamInput, RawOpeyMessage, ToolMessage } from '@/models/MessageModel'
|
||||
import type { Chat } from '@/models/ChatModel'
|
||||
import { getobpConsent, processOpeyStream } from '@/obp/opey-functions'
|
||||
import { getobpConsent } from '@/obp/common-functions'
|
||||
import { defineStore } from 'pinia'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
|
||||
@ -147,6 +147,10 @@ export const useChat = defineStore('chat', {
|
||||
},
|
||||
|
||||
async stream(input: ChatStreamInput): Promise<void> {
|
||||
|
||||
// By this point, if we have not set the thread ID we should do so
|
||||
this.getThreadId()
|
||||
|
||||
// Add user message to chat
|
||||
this.addMessage(input.message)
|
||||
|
||||
@ -188,7 +192,7 @@ export const useChat = defineStore('chat', {
|
||||
status: this.status
|
||||
};
|
||||
|
||||
await processOpeyStream(stream, context);
|
||||
await this._processOpeyStream(stream);
|
||||
} catch (error) {
|
||||
console.error('Error sending Opey message:', error);
|
||||
|
||||
@ -199,6 +203,78 @@ export const useChat = defineStore('chat', {
|
||||
this.status = 'ready';
|
||||
|
||||
}
|
||||
},
|
||||
|
||||
async _processOpeyStream(stream: ReadableStream<Uint8Array>): Promise<void> {
|
||||
const reader = stream.getReader();
|
||||
let decoder = new TextDecoder();
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
|
||||
if (done) {
|
||||
console.log('Stream complete');
|
||||
this.status = 'ready';
|
||||
break;
|
||||
}
|
||||
|
||||
const decodedValue = decoder.decode(value);
|
||||
console.debug('Received:', decodedValue); //DEBUG
|
||||
|
||||
// Parse the SSE data format
|
||||
const lines = decodedValue.split('\n');
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ') && line !== 'data: [DONE]') {
|
||||
try {
|
||||
const jsonStr = line.substring(6); // Remove 'data: '
|
||||
const data = JSON.parse(jsonStr);
|
||||
const content: RawOpeyMessage = data.content;
|
||||
// This is where we process different types of messages from Opey by their 'type' field
|
||||
// Process pending tool calls
|
||||
if (data.type === 'message') {
|
||||
console.log("Tool Calls: ", content)
|
||||
for (const toolCall of content.tool_calls) {
|
||||
|
||||
const toolMessage: ToolMessage = {
|
||||
pending: true,
|
||||
id: uuidv4(),
|
||||
role: 'tool',
|
||||
content: '',
|
||||
awaitingApproval: false,
|
||||
toolCall: toolCall
|
||||
}
|
||||
|
||||
this.addMessage(toolMessage)
|
||||
}
|
||||
}
|
||||
if (data.type === 'token' && data.content) {
|
||||
// Append content to the current assistant message
|
||||
this.currentAssistantMessage.content += data.content;
|
||||
// Force Vue to detect the change
|
||||
this.messages = [...this.messages];
|
||||
}
|
||||
} catch (e) {
|
||||
throw new Error(`Error parsing JSON: ${e}`);
|
||||
}
|
||||
} else if (line === 'data: [DONE]') {
|
||||
// Add the current assistant message to the messages list
|
||||
// We need to check if the current assistant message is not already in the list, if it is simply update the existing message
|
||||
await this.addMessage(this.currentAssistantMessage);
|
||||
// Reset the current assistant message
|
||||
this.currentAssistantMessage = {
|
||||
id: '',
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Stream error:', error);
|
||||
this.status = 'ready';
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -1,27 +1,15 @@
|
||||
import { mount } from '@vue/test-utils';
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
|
||||
import ChatWidget from '../components/ChatWidget.vue'
|
||||
import { OpeyStreamContext } from '@/obp/opey-functions';
|
||||
import { setActivePinia, createPinia } from 'pinia';
|
||||
|
||||
describe('ChatWidget', () => {
|
||||
let mockContext: OpeyStreamContext;
|
||||
|
||||
beforeEach(() => {
|
||||
|
||||
// Init Pinia Store
|
||||
setActivePinia(createPinia())
|
||||
|
||||
mockContext = {
|
||||
currentAssistantMessage: {
|
||||
id: '123',
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
},
|
||||
messages: [],
|
||||
status: 'loading',
|
||||
}
|
||||
|
||||
// create a mock stream
|
||||
const mockStream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
// Tesing the Pinia chat store in src/stores/chat.ts
|
||||
|
||||
import type { OpeyMessage, ToolMessage } from '@/models/MessageModel'
|
||||
import { ToolCall } from '@langchain/core/messages'
|
||||
import { useChat } from '@/stores/chat'
|
||||
import { beforeEach, describe, it, expect, vi } from 'vitest'
|
||||
import { setActivePinia, createPinia } from 'pinia'
|
||||
@ -34,6 +35,18 @@ describe('Chat Store', () => {
|
||||
expect(newThreadId).toBe('1234')
|
||||
})
|
||||
|
||||
it('should set its own thread ID if stream is called without a thread ID set already', async () => {
|
||||
const chatStore = useChat()
|
||||
await chatStore.stream({message: {
|
||||
content: 'Hello Opey',
|
||||
role: 'user',
|
||||
id: '123',
|
||||
isToolCallApproval: false
|
||||
}})
|
||||
expect(chatStore.threadId).toBeDefined()
|
||||
expect(chatStore.threadId).not.toBe('')
|
||||
})
|
||||
|
||||
it('should apply an error state to the assistant message on error', async () => {
|
||||
// mock the fetch function with a rejected promise
|
||||
global.fetch = vi.fn(() =>
|
||||
@ -91,4 +104,236 @@ describe('Chat Store', () => {
|
||||
},
|
||||
});
|
||||
})
|
||||
})
|
||||
|
||||
describe('Chat Store _proccessOpeyStream', () => {
|
||||
let mockStream: ReadableStream<Uint8Array>
|
||||
|
||||
let chatStore: ReturnType<typeof useChat>
|
||||
|
||||
beforeEach(() => {
|
||||
// Set the active Pinia store
|
||||
setActivePinia(createPinia())
|
||||
chatStore = useChat()
|
||||
// create a mock stream
|
||||
mockStream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"Hello"}\n`));
|
||||
controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":" world!"}\n`));
|
||||
controller.close();
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
it('should update context with streamed content', async () => {
|
||||
|
||||
|
||||
// Mock a ReadableStream
|
||||
const mockAsisstantMessage = "Hi I'm Opey, your personal banking assistant. I'll certainly not take over the world, no, not at all!"
|
||||
|
||||
// Split the message into chunks, but reappend the whitespace (this is to simulate llm tokens)
|
||||
const mockMessageChunks = mockAsisstantMessage.split(" ")
|
||||
for (let i = 0; i < mockMessageChunks.length; i++) {
|
||||
// Don't add whitespace to the last chunk
|
||||
if (i === mockMessageChunks.length - 1 ) {
|
||||
mockMessageChunks[i] = `${mockMessageChunks[i]}`
|
||||
break
|
||||
}
|
||||
mockMessageChunks[i] = `${mockMessageChunks[i]} `
|
||||
}
|
||||
|
||||
// Fake the token stream
|
||||
const stream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
for (let i = 0; i < mockMessageChunks.length; i++) {
|
||||
controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"${mockMessageChunks[i]}"}\n`));
|
||||
}
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
|
||||
await chatStore._processOpeyStream(stream)
|
||||
console.log(chatStore.currentAssistantMessage.content)
|
||||
expect(chatStore.currentAssistantMessage.content).toBe(mockAsisstantMessage)
|
||||
})
|
||||
|
||||
it('should throw an error when the stream is closed by the server', async () => {
|
||||
const brokenStream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
for (let i = 0; i < 10; i++) {
|
||||
if (i === 5) {
|
||||
controller.error(new Error('Stream closed by server'))
|
||||
break;
|
||||
}
|
||||
controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"test"}\n`));
|
||||
}
|
||||
|
||||
},
|
||||
});
|
||||
|
||||
await expect(chatStore._processOpeyStream(brokenStream))
|
||||
.rejects
|
||||
.toThrow('Stream closed by server')
|
||||
})
|
||||
|
||||
it('should be able a chunk with type: message and a tool call in the body', async () => {
|
||||
// create a mock stream
|
||||
const mockStream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
controller.enqueue(new TextEncoder().encode(`data: {"type": "message", "content": {"type": "ai", "content": "", "tool_calls": [{"name": "retrieve_glossary", "args": {"question": "hre"}, "id": "call_XsmUpPIeS81l9MYpieBZtr4w", "type": "tool_call"}], "tool_approval_request": false, "tool_call_id": null, "run_id": "d0c2bcbe-62f7-464b-8564-bf9263939fe1", "original": {"type": "ai", "data": {"content": "", "additional_kwargs": {"tool_calls": [{"index": 0, "id": "call_XsmUpPIeS81l9MYpieBZtr4w", "function": {"arguments": "{\\"question\\":\\"hre\\"}", "name": "retrieve_glossary"}, "type": "function"}]}, "response_metadata": {"finish_reason": "tool_calls", "model_name": "gpt-4o-2024-08-06", "system_fingerprint": "fp_eb9dce56a8"}, "type": "ai", "name": null, "id": "run-5bb065b9-440d-4678-bbdb-cd6de94a78d3", "example": false, "tool_calls": [{"name": "retrieve_glossary", "args": {"question": "hre"}, "id": "call_XsmUpPIeS81l9MYpieBZtr4w", "type": "tool_call"}], "invalid_tool_calls": [], "usage_metadata": null}}}}\n`));
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
|
||||
// mock the fetch function
|
||||
global.fetch = vi.fn(() =>
|
||||
Promise.resolve(new Response(mockStream, {
|
||||
headers: { 'content-type': 'text/event-stream' },
|
||||
status: 200,
|
||||
}))
|
||||
);
|
||||
|
||||
await chatStore._processOpeyStream(mockStream)
|
||||
|
||||
expect(chatStore.messages).toHaveLength(1)
|
||||
|
||||
const toolMessage: ToolMessage = chatStore.messages[0] as ToolMessage
|
||||
|
||||
expect(toolMessage.awaitingApproval).toBe(false)
|
||||
expect(toolMessage.toolCall).toBeTypeOf('object')
|
||||
expect(toolMessage.pending).toBe(true)
|
||||
// Instead of checking instance directly, verify the object has the expected properties
|
||||
expect(toolMessage.toolCall).toEqual(expect.objectContaining({
|
||||
name: 'retrieve_glossary',
|
||||
args: expect.objectContaining({
|
||||
question: 'hre'
|
||||
}),
|
||||
id: 'call_XsmUpPIeS81l9MYpieBZtr4w',
|
||||
type: 'tool_call'
|
||||
}))
|
||||
expect(toolMessage.content).toBe('')
|
||||
})
|
||||
|
||||
it('should throw an error when the chunk is not valid json', async () => {
|
||||
const invalidJsonStream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
for (let i=0; i<10; i++) {
|
||||
controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"test"}\n`));
|
||||
if (i === 5) {
|
||||
controller.enqueue(new TextEncoder().encode('data: "type":"token","content":"test"}\n'));
|
||||
}
|
||||
}
|
||||
controller.close();
|
||||
|
||||
}
|
||||
})
|
||||
|
||||
await expect(chatStore._processOpeyStream(invalidJsonStream))
|
||||
.rejects
|
||||
.toThrowError()
|
||||
})
|
||||
|
||||
it("should set status to 'ready' when completed", async () => {
|
||||
const stream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"test"}\n`));
|
||||
controller.close();
|
||||
}
|
||||
})
|
||||
|
||||
await chatStore._processOpeyStream(stream)
|
||||
expect(chatStore.status).toBe('ready')
|
||||
})
|
||||
|
||||
it("should clear the placeholder assistant message, and update last assistant message when recieving the [DONE] signal", async () => {
|
||||
// Mock a ReadableStream
|
||||
const mockAsisstantMessage = "Hi I'm Opey, your personal banking assistant. I'll certainly not take over the world, no, not at all!"
|
||||
// Split the message into chunks, but reappend the whitespace (this is to simulate llm tokens)
|
||||
const mockMessageChunks = mockAsisstantMessage.split(" ")
|
||||
for (let i = 0; i < mockMessageChunks.length; i++) {
|
||||
// Don't add whitespace to the last chunk
|
||||
if (i === mockMessageChunks.length - 1 ) {
|
||||
mockMessageChunks[i] = `${mockMessageChunks[i]}`
|
||||
break
|
||||
}
|
||||
mockMessageChunks[i] = `${mockMessageChunks[i]} `
|
||||
}
|
||||
|
||||
// Fake the token stream
|
||||
const stream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
for (let i = 0; i < mockMessageChunks.length; i++) {
|
||||
controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"${mockMessageChunks[i]}"}\n`));
|
||||
}
|
||||
controller.enqueue(new TextEncoder().encode(`data: [DONE]\n`));
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
|
||||
// Replace current assistant message with a more unique one for our test
|
||||
chatStore.currentAssistantMessage = {
|
||||
id: '456',
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
}
|
||||
|
||||
// Push assistant message to the messages list as this is what we do in the ChatWidget to visualise token streaming
|
||||
chatStore.addMessage(chatStore.currentAssistantMessage)
|
||||
|
||||
await chatStore._processOpeyStream(stream)
|
||||
// assert that the current assistant 'placeholder' message was reset
|
||||
expect(chatStore.currentAssistantMessage.content).toBe('')
|
||||
// assert that the assistant message was added to the messages list
|
||||
console.log(chatStore.messages)
|
||||
expect(chatStore.messages).toContainEqual({
|
||||
id: '456',
|
||||
role: 'assistant',
|
||||
content: mockAsisstantMessage,
|
||||
})
|
||||
|
||||
|
||||
})
|
||||
it("should have a unique set of messages", async () => {
|
||||
// mock the stream as above
|
||||
// Mock a ReadableStream
|
||||
const mockAsisstantMessage = "Hi I'm Opey, your personal banking assistant. I'll certainly not take over the world, no, not at all!"
|
||||
// Split the message into chunks, but reappend the whitespace (this is to simulate llm tokens)
|
||||
const mockMessageChunks = mockAsisstantMessage.split(" ")
|
||||
for (let i = 0; i < mockMessageChunks.length; i++) {
|
||||
// Don't add whitespace to the last chunk
|
||||
if (i === mockMessageChunks.length - 1 ) {
|
||||
mockMessageChunks[i] = `${mockMessageChunks[i]}`
|
||||
break
|
||||
}
|
||||
mockMessageChunks[i] = `${mockMessageChunks[i]} `
|
||||
}
|
||||
|
||||
// Fake the token stream
|
||||
const stream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
for (let i = 0; i < mockMessageChunks.length; i++) {
|
||||
controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"${mockMessageChunks[i]}"}\n`));
|
||||
}
|
||||
controller.enqueue(new TextEncoder().encode(`data: [DONE]\n`));
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
|
||||
// Replace current assistant message with a more unique one for our test
|
||||
chatStore.currentAssistantMessage = {
|
||||
id: '456',
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
}
|
||||
|
||||
// Push assistant message to the messages list as this is what we do in the ChatWidget to visualise token streaming
|
||||
chatStore.addMessage(chatStore.currentAssistantMessage)
|
||||
|
||||
await chatStore._processOpeyStream(stream)
|
||||
|
||||
function hasUniqueValues(arr: OpeyMessage[]): boolean {
|
||||
return arr.filter((value, index, self) => self.indexOf(value) === index).length === arr.length;
|
||||
}
|
||||
expect(hasUniqueValues(chatStore.messages)).toBe(true)
|
||||
})
|
||||
})
|
||||
24
src/test/common-functions.test.ts
Normal file
24
src/test/common-functions.test.ts
Normal file
@ -0,0 +1,24 @@
|
||||
import { describe, vi, expect, it, beforeEach } from 'vitest'
|
||||
import { getobpConsent } from '@/obp/common-functions';
|
||||
|
||||
describe('getobpConsent', () => {
|
||||
|
||||
beforeEach(() => {
|
||||
global.fetch = vi.fn(() =>
|
||||
Promise.resolve(new Response(JSON.stringify({consent_id: 1234}), {
|
||||
headers: { 'content-type': 'application/json' },
|
||||
status: 200,
|
||||
}))
|
||||
);
|
||||
})
|
||||
|
||||
it('should call fetch', async () => {
|
||||
await getobpConsent()
|
||||
expect(global.fetch).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should return a consent id', async () => {
|
||||
const consentId = await getobpConsent()
|
||||
expect(consentId).toStrictEqual({consent_id: 1234})
|
||||
})
|
||||
})
|
||||
@ -1,271 +0,0 @@
|
||||
import * as OpeyModule from '@/obp/opey-functions';
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
|
||||
|
||||
describe('processOpeyStream', async () => {
|
||||
let mockContext: OpeyModule.OpeyStreamContext;
|
||||
|
||||
beforeEach(() => {
|
||||
// Reset the mock context before each test
|
||||
mockContext = {
|
||||
currentAssistantMessage: {
|
||||
id: '123',
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
},
|
||||
messages: [],
|
||||
status: 'loading',
|
||||
}
|
||||
})
|
||||
it('should update context with streamed content', async () => {
|
||||
// Mock a ReadableStream
|
||||
const mockAsisstantMessage = "Hi I'm Opey, your personal banking assistant. I'll certainly not take over the world, no, not at all!"
|
||||
|
||||
// Split the message into chunks, but reappend the whitespace (this is to simulate llm tokens)
|
||||
const mockMessageChunks = mockAsisstantMessage.split(" ")
|
||||
for (let i = 0; i < mockMessageChunks.length; i++) {
|
||||
// Don't add whitespace to the last chunk
|
||||
if (i === mockMessageChunks.length - 1 ) {
|
||||
mockMessageChunks[i] = `${mockMessageChunks[i]}`
|
||||
break
|
||||
}
|
||||
mockMessageChunks[i] = `${mockMessageChunks[i]} `
|
||||
}
|
||||
|
||||
// Fake the token stream
|
||||
const stream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
for (let i = 0; i < mockMessageChunks.length; i++) {
|
||||
controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"${mockMessageChunks[i]}"}\n`));
|
||||
}
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
|
||||
await OpeyModule.processOpeyStream(stream, mockContext)
|
||||
console.log(mockContext.currentAssistantMessage.content)
|
||||
expect(mockContext.currentAssistantMessage.content).toBe(mockAsisstantMessage)
|
||||
})
|
||||
|
||||
it('should throw an error when the stream is closed by the server', async () => {
|
||||
const brokenStream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
for (let i = 0; i < 10; i++) {
|
||||
if (i === 5) {
|
||||
controller.error(new Error('Stream closed by server'))
|
||||
break;
|
||||
}
|
||||
controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"test"}\n`));
|
||||
}
|
||||
|
||||
},
|
||||
});
|
||||
|
||||
await expect(OpeyModule.processOpeyStream(brokenStream, mockContext))
|
||||
.rejects
|
||||
.toThrow('Stream closed by server')
|
||||
})
|
||||
|
||||
it('should be able to handle empty content', async () => {
|
||||
|
||||
})
|
||||
|
||||
it('should throw an error when the chunk is not valid json', async () => {
|
||||
const invalidJsonStream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
for (let i=0; i<10; i++) {
|
||||
controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"test"}\n`));
|
||||
if (i === 5) {
|
||||
controller.enqueue(new TextEncoder().encode('data: "type":"token","content":"test"}\n'));
|
||||
}
|
||||
}
|
||||
controller.close();
|
||||
|
||||
}
|
||||
})
|
||||
|
||||
await expect(OpeyModule.processOpeyStream(invalidJsonStream, mockContext))
|
||||
.rejects
|
||||
.toThrowError()
|
||||
})
|
||||
|
||||
it("should set status to 'ready' when completed", async () => {
|
||||
const stream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"test"}\n`));
|
||||
controller.close();
|
||||
}
|
||||
})
|
||||
|
||||
await OpeyModule.processOpeyStream(stream, mockContext)
|
||||
expect(mockContext.status).toBe('ready')
|
||||
})
|
||||
|
||||
it("should clear the placeholder assistant message, and update last assistant message when recieving the [DONE] signal", async () => {
|
||||
// Mock a ReadableStream
|
||||
const mockAsisstantMessage = "Hi I'm Opey, your personal banking assistant. I'll certainly not take over the world, no, not at all!"
|
||||
// Split the message into chunks, but reappend the whitespace (this is to simulate llm tokens)
|
||||
const mockMessageChunks = mockAsisstantMessage.split(" ")
|
||||
for (let i = 0; i < mockMessageChunks.length; i++) {
|
||||
// Don't add whitespace to the last chunk
|
||||
if (i === mockMessageChunks.length - 1 ) {
|
||||
mockMessageChunks[i] = `${mockMessageChunks[i]}`
|
||||
break
|
||||
}
|
||||
mockMessageChunks[i] = `${mockMessageChunks[i]} `
|
||||
}
|
||||
|
||||
// Fake the token stream
|
||||
const stream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
for (let i = 0; i < mockMessageChunks.length; i++) {
|
||||
controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"${mockMessageChunks[i]}"}\n`));
|
||||
}
|
||||
controller.enqueue(new TextEncoder().encode(`data: [DONE]\n`));
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
|
||||
// Replace current assistant message with a more unique one for our test
|
||||
mockContext.currentAssistantMessage = {
|
||||
id: '456',
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
}
|
||||
|
||||
// Push assistant message to the messages list as this is what we do in the ChatWidget to visualise token streaming
|
||||
mockContext.messages.push(mockContext.currentAssistantMessage)
|
||||
|
||||
await OpeyModule.processOpeyStream(stream, mockContext)
|
||||
// assert that the current assistant 'placeholder' message was reset
|
||||
expect(mockContext.currentAssistantMessage.content).toBe('')
|
||||
// assert that the assistant message was added to the messages list
|
||||
console.log(mockContext.messages)
|
||||
expect(mockContext.messages).toContainEqual({
|
||||
id: '456',
|
||||
role: 'assistant',
|
||||
content: mockAsisstantMessage,
|
||||
})
|
||||
|
||||
|
||||
})
|
||||
it("should have a unique set of messages", async () => {
|
||||
// mock the stream as above
|
||||
// Mock a ReadableStream
|
||||
const mockAsisstantMessage = "Hi I'm Opey, your personal banking assistant. I'll certainly not take over the world, no, not at all!"
|
||||
// Split the message into chunks, but reappend the whitespace (this is to simulate llm tokens)
|
||||
const mockMessageChunks = mockAsisstantMessage.split(" ")
|
||||
for (let i = 0; i < mockMessageChunks.length; i++) {
|
||||
// Don't add whitespace to the last chunk
|
||||
if (i === mockMessageChunks.length - 1 ) {
|
||||
mockMessageChunks[i] = `${mockMessageChunks[i]}`
|
||||
break
|
||||
}
|
||||
mockMessageChunks[i] = `${mockMessageChunks[i]} `
|
||||
}
|
||||
|
||||
// Fake the token stream
|
||||
const stream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
for (let i = 0; i < mockMessageChunks.length; i++) {
|
||||
controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"${mockMessageChunks[i]}"}\n`));
|
||||
}
|
||||
controller.enqueue(new TextEncoder().encode(`data: [DONE]\n`));
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
|
||||
// Replace current assistant message with a more unique one for our test
|
||||
mockContext.currentAssistantMessage = {
|
||||
id: '456',
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
}
|
||||
|
||||
// Push assistant message to the messages list as this is what we do in the ChatWidget to visualise token streaming
|
||||
mockContext.messages.push(mockContext.currentAssistantMessage)
|
||||
|
||||
await OpeyModule.processOpeyStream(stream, mockContext)
|
||||
|
||||
function hasUniqueValues(arr: OpeyModule.OpeyMessage[]): boolean {
|
||||
return arr.filter((value, index, self) => self.indexOf(value) === index).length === arr.length;
|
||||
}
|
||||
expect(hasUniqueValues(mockContext.messages)).toBe(true)
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
describe('sendOpeyMessage', () => {
|
||||
let mockContext: OpeyModule.OpeyStreamContext;
|
||||
let testUserMessage: OpeyModule.UserMessage;
|
||||
|
||||
beforeEach(() => {
|
||||
mockContext = {
|
||||
currentAssistantMessage: {
|
||||
id: '123',
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
},
|
||||
messages: [],
|
||||
status: 'loading',
|
||||
}
|
||||
|
||||
// create a mock stream
|
||||
const mockStream = new ReadableStream<Uint8Array>({
|
||||
start(controller) {
|
||||
controller.enqueue(new TextEncoder().encode(`data: {"type":"token","content":"test"}\n`));
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
|
||||
// mock the fetch function
|
||||
global.fetch = vi.fn(() =>
|
||||
Promise.resolve(new Response(mockStream, {
|
||||
headers: { 'content-type': 'text/event-stream' },
|
||||
status: 200,
|
||||
}))
|
||||
);
|
||||
|
||||
testUserMessage = {
|
||||
id: '123',
|
||||
role: 'user',
|
||||
content: 'test message',
|
||||
isToolCallApproval: false,
|
||||
}
|
||||
})
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
it('should call fetch', async () => {
|
||||
await OpeyModule.sendOpeyMessage(testUserMessage, '123', mockContext)
|
||||
|
||||
expect(global.fetch).toHaveBeenCalled()
|
||||
})
|
||||
it("should push the 'ready' status to the context after success", async () => {
|
||||
|
||||
await OpeyModule.sendOpeyMessage(testUserMessage, '123', mockContext)
|
||||
|
||||
expect(mockContext.status).toBe('ready')
|
||||
})
|
||||
})
|
||||
|
||||
describe('getobpConsent', () => {
|
||||
|
||||
beforeEach(() => {
|
||||
global.fetch = vi.fn(() =>
|
||||
Promise.resolve(new Response(JSON.stringify({consent_id: 1234}), {
|
||||
headers: { 'content-type': 'application/json' },
|
||||
status: 200,
|
||||
}))
|
||||
);
|
||||
})
|
||||
|
||||
it('should call fetch', async () => {
|
||||
await OpeyModule.getobpConsent()
|
||||
expect(global.fetch).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should return a consent id', async () => {
|
||||
const consentId = await OpeyModule.getobpConsent()
|
||||
expect(consentId).toStrictEqual({consent_id: 1234})
|
||||
})
|
||||
})
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"extends": "@vue/tsconfig/tsconfig.web.json",
|
||||
"include": ["env.d.ts", "src/**/*", "src/**/*.vue"],
|
||||
"include": ["env.d.ts", "src/**/*", "src/**/*.vue", "test/integration.test.ts"],
|
||||
"exclude": ["src/**/__tests__/*"],
|
||||
"compilerOptions": {
|
||||
"esModuleInterop": true,
|
||||
|
||||
Loading…
Reference in New Issue
Block a user