use stream mode depend on content-type

This commit is contained in:
2023-03-24 13:14:34 +08:00
parent 0148465e34
commit 99d3c69647
2 changed files with 37 additions and 28 deletions

View File

@@ -1,6 +1,6 @@
import { useState } from "preact/hooks";
import type { ChatStore } from "./app";
import ChatGPT, { ChunkMessage } from "./chatgpt";
import ChatGPT, { ChunkMessage, FetchResponse } from "./chatgpt";
import Message from "./message";
import Settings from "./settings";
@@ -18,9 +18,8 @@ export default function ChatBOX(props: {
const client = new ChatGPT(chatStore.apiKey);
const _completeWithStreamMode = async () => {
const _completeWithStreamMode = async (response: Response) => {
// call api, return reponse text
const response = await client.completeWithSteam();
console.log("response", response);
const reader = response.body?.getReader();
const allChunkMessage: string[] = [];
@@ -83,10 +82,10 @@ export default function ChatBOX(props: {
});
};
const _completeWithFetchMode = async () => {
// call api, return reponse text
const response = await client.complete();
chatStore.history.push({ role: "assistant", content: response });
const _completeWithFetchMode = async (response: Response) => {
const data = (await response.json()) as FetchResponse;
const content = client.processFetchResponse(data);
chatStore.history.push({ role: "assistant", content });
setShowGenerating(false);
};
@@ -98,10 +97,14 @@ export default function ChatBOX(props: {
client.messages = chatStore.history.slice(chatStore.postBeginIndex);
try {
setShowGenerating(true);
if (chatStore.streamMode) {
await _completeWithStreamMode();
const response = await client._fetch(chatStore.streamMode);
const contentType = response.headers.get("content-type");
if (contentType === "text/event-stream") {
await _completeWithStreamMode(response);
} else if (contentType === "application/json") {
await _completeWithFetchMode(response);
} else {
await _completeWithFetchMode();
throw `unknown response content type ${contentType}`;
}
// manually copy status from client to chatStore
chatStore.maxTokens = client.max_tokens;

View File

@@ -9,6 +9,23 @@ export interface ChunkMessage {
}[];
}
export interface FetchResponse {
id: string;
object: string;
created: number;
model: string;
usage: {
prompt_tokens: number | undefined;
completion_tokens: number | undefined;
total_tokens: number | undefined;
};
choices: {
message: Message | undefined;
finish_reason: "stop" | "length";
index: number | undefined;
}[];
}
class Chat {
OPENAI_API_KEY: string;
messages: Message[];
@@ -57,22 +74,7 @@ class Chat {
});
}
async fetch(): Promise<{
id: string;
object: string;
created: number;
model: string;
usage: {
prompt_tokens: number | undefined;
completion_tokens: number | undefined;
total_tokens: number | undefined;
};
choices: {
message: Message | undefined;
finish_reason: "stop" | "length";
index: number | undefined;
}[];
}> {
async fetch(): Promise<FetchResponse> {
const resp = await this._fetch();
return await resp.json();
}
@@ -83,8 +85,7 @@ class Chat {
return this.messages.slice(-1)[0].content;
}
async complete(): Promise<string> {
const resp = await this.fetch();
processFetchResponse(resp: FetchResponse): string {
this.total_tokens = resp?.usage?.total_tokens ?? 0;
if (resp?.choices[0]?.message) {
this.messages.push(resp?.choices[0]?.message);
@@ -101,6 +102,11 @@ class Chat {
);
}
async complete(): Promise<string> {
const resp = await this.fetch();
return this.processFetchResponse(resp);
}
completeWithSteam() {
this.total_tokens = this.messages
.map((msg) => this.calculate_token_length(msg.content) + 20)