support both fetch and stream mode

This commit is contained in:
2023-03-15 02:33:49 +08:00
parent a68e4b9dbd
commit 5eb070a62f
2 changed files with 62 additions and 28 deletions

View File

@@ -13,6 +13,7 @@ export interface ChatStore {
maxTokens: number; maxTokens: number;
apiKey: string; apiKey: string;
apiEndpoint: string; apiEndpoint: string;
streamMode: boolean;
} }
const defaultAPIKEY = () => { const defaultAPIKEY = () => {
@@ -33,11 +34,20 @@ const defaultAPIEndpoint = () => {
return sys; return sys;
}; };
const defauleMode = () => {
const queryParameters = new URLSearchParams(window.location.search);
const sys = queryParameters.get("mode");
if (sys === "stream") return true;
if (sys === "fetch") return false;
return undefined;
};
const _defaultAPIEndpoint = "https://api.openai.com/v1/chat/completions"; const _defaultAPIEndpoint = "https://api.openai.com/v1/chat/completions";
export const newChatStore = ( export const newChatStore = (
apiKey = "", apiKey = "",
systemMessageContent = "你是一个猫娘,你要模仿猫娘的语气说话", systemMessageContent = "你是一个猫娘,你要模仿猫娘的语气说话",
apiEndpoint = _defaultAPIEndpoint apiEndpoint = _defaultAPIEndpoint,
streamMode = true
): ChatStore => { ): ChatStore => {
return { return {
systemMessageContent: defaultSysMessage() || systemMessageContent, systemMessageContent: defaultSysMessage() || systemMessageContent,
@@ -48,6 +58,7 @@ export const newChatStore = (
maxTokens: 4096, maxTokens: 4096,
apiKey: defaultAPIKEY() || apiKey, apiKey: defaultAPIKEY() || apiKey,
apiEndpoint: defaultAPIEndpoint() || apiEndpoint, apiEndpoint: defaultAPIEndpoint() || apiEndpoint,
streamMode: defauleMode() ?? streamMode,
}; };
}; };
@@ -83,12 +94,7 @@ export function App() {
const client = new ChatGPT(chatStore.apiKey); const client = new ChatGPT(chatStore.apiKey);
const _complete = async () => { const _completeWithStreamMode = async () => {
// manually copy status from chatStore to client
client.apiEndpoint = chatStore.apiEndpoint;
client.sysMessageContent = chatStore.systemMessageContent;
client.messages = chatStore.history.slice(chatStore.postBeginIndex);
// call api, return reponse text // call api, return reponse text
const response = await client.completeWithSteam(); const response = await client.completeWithSteam();
console.log("response", response); console.log("response", response);
@@ -149,7 +155,28 @@ export function App() {
setShowGenerating(false); setShowGenerating(false);
}, },
}); });
};
const _completeWithFetchMode = async () => {
// call api, return reponse text
const response = await client.complete();
chatStore.history.push({ role: "assistant", content: response });
setShowGenerating(false);
};
// wrap the actuall complete api
const complete = async () => {
// manually copy status from chatStore to client
client.apiEndpoint = chatStore.apiEndpoint;
client.sysMessageContent = chatStore.systemMessageContent;
client.messages = chatStore.history.slice(chatStore.postBeginIndex);
try {
setShowGenerating(true);
if (chatStore.streamMode) {
await _completeWithStreamMode();
} else {
await _completeWithFetchMode();
}
// manually copy status from client to chatStore // manually copy status from client to chatStore
chatStore.maxTokens = client.max_tokens; chatStore.maxTokens = client.max_tokens;
chatStore.tokenMargin = client.tokens_margin; chatStore.tokenMargin = client.tokens_margin;
@@ -161,13 +188,6 @@ export function App() {
chatStore.history.length - client.messages.length; chatStore.history.length - client.messages.length;
console.log("postBeginIndex", chatStore.postBeginIndex); console.log("postBeginIndex", chatStore.postBeginIndex);
setChatStore({ ...chatStore }); setChatStore({ ...chatStore });
};
// wrap the actuall complete api
const complete = async () => {
try {
setShowGenerating(true);
await _complete();
} catch (error) { } catch (error) {
alert(error); alert(error);
} }
@@ -205,7 +225,8 @@ export function App() {
newChatStore( newChatStore(
allChatStore[selectedChatIndex].apiKey, allChatStore[selectedChatIndex].apiKey,
allChatStore[selectedChatIndex].systemMessageContent, allChatStore[selectedChatIndex].systemMessageContent,
allChatStore[selectedChatIndex].apiEndpoint allChatStore[selectedChatIndex].apiEndpoint,
allChatStore[selectedChatIndex].streamMode
) )
); );
setAllChatStore([...allChatStore]); setAllChatStore([...allChatStore]);
@@ -240,13 +261,15 @@ export function App() {
const oldSystemMessageContent = const oldSystemMessageContent =
allChatStore[selectedChatIndex].systemMessageContent; allChatStore[selectedChatIndex].systemMessageContent;
const oldAPIEndpoint = allChatStore[selectedChatIndex].apiEndpoint; const oldAPIEndpoint = allChatStore[selectedChatIndex].apiEndpoint;
const oldMode = allChatStore[selectedChatIndex].streamMode;
allChatStore.splice(selectedChatIndex, 1); allChatStore.splice(selectedChatIndex, 1);
if (allChatStore.length === 0) { if (allChatStore.length === 0) {
allChatStore.push( allChatStore.push(
newChatStore( newChatStore(
defaultAPIKEY() || oldAPIkey, defaultAPIKEY() || oldAPIkey,
defaultSysMessage() || oldSystemMessageContent, defaultSysMessage() || oldSystemMessageContent,
defaultAPIEndpoint() || oldAPIEndpoint defaultAPIEndpoint() || oldAPIEndpoint,
defauleMode() || oldMode
) )
); );
setSelectedChatIndex(0); setSelectedChatIndex(0);
@@ -291,6 +314,18 @@ export function App() {
> >
ENDPOINT ENDPOINT
</button> </button>
<button
className="underline"
onClick={() => {
const choice = confirm(
"FETCH 模式单次请求一段完整的对话文本tokens 数量是准确的。\nSTREAM 模式下可以动态看到生成过程,但只能估算 tokens 数量token 过多时可能裁剪过多或过少历史消息。\n需要使用 STREAM 模式吗?"
);
chatStore.streamMode = !!choice;
setChatStore({ ...chatStore });
}}
>
{chatStore.streamMode ? "STREAM" : "FETCH"}
</button>
</div> </div>
<div className="text-xs"> <div className="text-xs">
<span>Total: {chatStore.totalTokens}</span>{" "} <span>Total: {chatStore.totalTokens}</span>{" "}

View File

@@ -39,7 +39,7 @@ class Chat {
this.apiEndpoint = apiEndPoint; this.apiEndpoint = apiEndPoint;
} }
_fetch() { _fetch(stream = false) {
return fetch(this.apiEndpoint, { return fetch(this.apiEndpoint, {
method: "POST", method: "POST",
headers: { headers: {
@@ -52,7 +52,7 @@ class Chat {
{ role: "system", content: this.sysMessageContent }, { role: "system", content: this.sysMessageContent },
...this.messages, ...this.messages,
], ],
stream: true, stream,
}), }),
}); });
} }
@@ -102,11 +102,10 @@ class Chat {
} }
completeWithSteam() { completeWithSteam() {
this.total_tokens = this.total_tokens = this.messages
this.messages
.map((msg) => this.calculate_token_length(msg.content) + 20) .map((msg) => this.calculate_token_length(msg.content) + 20)
.reduce((a, v) => a + v); .reduce((a, v) => a + v);
return this._fetch(); return this._fetch(true);
} }
// https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them // https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them