From 5eb070a62fafa1bb21438fe812d844603a140c56 Mon Sep 17 00:00:00 2001 From: heimoshuiyu Date: Wed, 15 Mar 2023 02:33:49 +0800 Subject: [PATCH] support both fetch and stream mode --- src/app.tsx | 77 ++++++++++++++++++++++++++++++++++++-------------- src/chatgpt.ts | 13 ++++----- 2 files changed, 62 insertions(+), 28 deletions(-) diff --git a/src/app.tsx b/src/app.tsx index 1e10ba4..ef2cd3d 100644 --- a/src/app.tsx +++ b/src/app.tsx @@ -13,6 +13,7 @@ export interface ChatStore { maxTokens: number; apiKey: string; apiEndpoint: string; + streamMode: boolean; } const defaultAPIKEY = () => { @@ -33,11 +34,20 @@ const defaultAPIEndpoint = () => { return sys; }; +const defauleMode = () => { + const queryParameters = new URLSearchParams(window.location.search); + const sys = queryParameters.get("mode"); + if (sys === "stream") return true; + if (sys === "fetch") return false; + return undefined; +}; + const _defaultAPIEndpoint = "https://api.openai.com/v1/chat/completions"; export const newChatStore = ( apiKey = "", systemMessageContent = "你是一个猫娘,你要模仿猫娘的语气说话", - apiEndpoint = _defaultAPIEndpoint + apiEndpoint = _defaultAPIEndpoint, + streamMode = true ): ChatStore => { return { systemMessageContent: defaultSysMessage() || systemMessageContent, @@ -48,6 +58,7 @@ export const newChatStore = ( maxTokens: 4096, apiKey: defaultAPIKEY() || apiKey, apiEndpoint: defaultAPIEndpoint() || apiEndpoint, + streamMode: defauleMode() ?? streamMode, }; }; @@ -83,12 +94,7 @@ export function App() { const client = new ChatGPT(chatStore.apiKey); - const _complete = async () => { - // manually copy status from chatStore to client - client.apiEndpoint = chatStore.apiEndpoint; - client.sysMessageContent = chatStore.systemMessageContent; - client.messages = chatStore.history.slice(chatStore.postBeginIndex); - + const _completeWithStreamMode = async () => { // call api, return reponse text const response = await client.completeWithSteam(); console.log("response", response); @@ -149,25 +155,39 @@ export function App() { setShowGenerating(false); }, }); + }; - // manually copy status from client to chatStore - chatStore.maxTokens = client.max_tokens; - chatStore.tokenMargin = client.tokens_margin; - chatStore.totalTokens = client.total_tokens; - // when total token > max token - margin token: - // ChatGPT will "forgot" some historical message - // so client.message.length will be less than chatStore.history.length - chatStore.postBeginIndex = - chatStore.history.length - client.messages.length; - console.log("postBeginIndex", chatStore.postBeginIndex); - setChatStore({ ...chatStore }); + const _completeWithFetchMode = async () => { + // call api, return reponse text + const response = await client.complete(); + chatStore.history.push({ role: "assistant", content: response }); + setShowGenerating(false); }; // wrap the actuall complete api const complete = async () => { + // manually copy status from chatStore to client + client.apiEndpoint = chatStore.apiEndpoint; + client.sysMessageContent = chatStore.systemMessageContent; + client.messages = chatStore.history.slice(chatStore.postBeginIndex); try { setShowGenerating(true); - await _complete(); + if (chatStore.streamMode) { + await _completeWithStreamMode(); + } else { + await _completeWithFetchMode(); + } + // manually copy status from client to chatStore + chatStore.maxTokens = client.max_tokens; + chatStore.tokenMargin = client.tokens_margin; + chatStore.totalTokens = client.total_tokens; + // when total token > max token - margin token: + // ChatGPT will "forgot" some historical message + // so client.message.length will be less than chatStore.history.length + chatStore.postBeginIndex = + chatStore.history.length - client.messages.length; + console.log("postBeginIndex", chatStore.postBeginIndex); + setChatStore({ ...chatStore }); } catch (error) { alert(error); } @@ -205,7 +225,8 @@ export function App() { newChatStore( allChatStore[selectedChatIndex].apiKey, allChatStore[selectedChatIndex].systemMessageContent, - allChatStore[selectedChatIndex].apiEndpoint + allChatStore[selectedChatIndex].apiEndpoint, + allChatStore[selectedChatIndex].streamMode ) ); setAllChatStore([...allChatStore]); @@ -240,13 +261,15 @@ export function App() { const oldSystemMessageContent = allChatStore[selectedChatIndex].systemMessageContent; const oldAPIEndpoint = allChatStore[selectedChatIndex].apiEndpoint; + const oldMode = allChatStore[selectedChatIndex].streamMode; allChatStore.splice(selectedChatIndex, 1); if (allChatStore.length === 0) { allChatStore.push( newChatStore( defaultAPIKEY() || oldAPIkey, defaultSysMessage() || oldSystemMessageContent, - defaultAPIEndpoint() || oldAPIEndpoint + defaultAPIEndpoint() || oldAPIEndpoint, + defauleMode() || oldMode ) ); setSelectedChatIndex(0); @@ -291,6 +314,18 @@ export function App() { > ENDPOINT +
Total: {chatStore.totalTokens}{" "} diff --git a/src/chatgpt.ts b/src/chatgpt.ts index 6b2a18e..8673f50 100644 --- a/src/chatgpt.ts +++ b/src/chatgpt.ts @@ -39,7 +39,7 @@ class Chat { this.apiEndpoint = apiEndPoint; } - _fetch() { + _fetch(stream = false) { return fetch(this.apiEndpoint, { method: "POST", headers: { @@ -52,7 +52,7 @@ class Chat { { role: "system", content: this.sysMessageContent }, ...this.messages, ], - stream: true, + stream, }), }); } @@ -102,11 +102,10 @@ class Chat { } completeWithSteam() { - this.total_tokens = - this.messages - .map((msg) => this.calculate_token_length(msg.content) + 20) - .reduce((a, v) => a + v); - return this._fetch(); + this.total_tokens = this.messages + .map((msg) => this.calculate_token_length(msg.content) + 20) + .reduce((a, v) => a + v); + return this._fetch(true); } // https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them