change to stream api mode
This commit is contained in:
71
src/app.tsx
71
src/app.tsx
@@ -1,7 +1,7 @@
|
|||||||
import { useEffect, useState } from "preact/hooks";
|
import { useEffect, useState } from "preact/hooks";
|
||||||
import "./global.css";
|
import "./global.css";
|
||||||
|
|
||||||
import ChatGPT, { Message } from "./chatgpt";
|
import ChatGPT, { Message, ChunkMessage } from "./chatgpt";
|
||||||
import { createRef } from "preact";
|
import { createRef } from "preact";
|
||||||
|
|
||||||
export interface ChatStore {
|
export interface ChatStore {
|
||||||
@@ -79,6 +79,7 @@ export function App() {
|
|||||||
|
|
||||||
const [inputMsg, setInputMsg] = useState("");
|
const [inputMsg, setInputMsg] = useState("");
|
||||||
const [showGenerating, setShowGenerating] = useState(false);
|
const [showGenerating, setShowGenerating] = useState(false);
|
||||||
|
const [generatingMessage, setGeneratingMessage] = useState("");
|
||||||
|
|
||||||
const client = new ChatGPT(chatStore.apiKey);
|
const client = new ChatGPT(chatStore.apiKey);
|
||||||
|
|
||||||
@@ -89,8 +90,65 @@ export function App() {
|
|||||||
client.messages = chatStore.history.slice(chatStore.postBeginIndex);
|
client.messages = chatStore.history.slice(chatStore.postBeginIndex);
|
||||||
|
|
||||||
// call api, return reponse text
|
// call api, return reponse text
|
||||||
const response = await client.complete();
|
const response = await client.completeWithSteam();
|
||||||
chatStore.history.push({ role: "assistant", content: response });
|
console.log("response", response);
|
||||||
|
const reader = response.body?.getReader();
|
||||||
|
const allChunkMessage: string[] = [];
|
||||||
|
await new ReadableStream({
|
||||||
|
async start(controller) {
|
||||||
|
while (true) {
|
||||||
|
let responseDone = false;
|
||||||
|
let state = await reader?.read();
|
||||||
|
let done = state?.done;
|
||||||
|
let value = state?.value;
|
||||||
|
if (done) break;
|
||||||
|
let text = new TextDecoder().decode(value);
|
||||||
|
// console.log("text:", text);
|
||||||
|
const lines = text
|
||||||
|
.trim()
|
||||||
|
.split("\n")
|
||||||
|
.map((line) => line.trim())
|
||||||
|
.filter((i) => {
|
||||||
|
if (!i) return false;
|
||||||
|
if (i === "data: [DONE]") {
|
||||||
|
responseDone = true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
console.log("lines", lines);
|
||||||
|
const jsons: ChunkMessage[] = lines
|
||||||
|
.map((line) => {
|
||||||
|
return JSON.parse(line.trim().slice("data: ".length));
|
||||||
|
})
|
||||||
|
.filter((i) => i);
|
||||||
|
// console.log("jsons", jsons);
|
||||||
|
const chunkText = jsons
|
||||||
|
.map((j) => j.choices[0].delta.content ?? "")
|
||||||
|
.join("");
|
||||||
|
// console.log("chunk text", chunkText);
|
||||||
|
allChunkMessage.push(chunkText);
|
||||||
|
setGeneratingMessage(allChunkMessage.join(""));
|
||||||
|
if (responseDone) break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// console.log("push to history", allChunkMessage);
|
||||||
|
chatStore.history.push({
|
||||||
|
role: "assistant",
|
||||||
|
content: allChunkMessage.join(""),
|
||||||
|
});
|
||||||
|
// manually copy status from client to chatStore
|
||||||
|
chatStore.maxTokens = client.max_tokens;
|
||||||
|
chatStore.tokenMargin = client.tokens_margin;
|
||||||
|
chatStore.totalTokens =
|
||||||
|
client.total_tokens +
|
||||||
|
39 +
|
||||||
|
client.calculate_token_length(allChunkMessage.join(""));
|
||||||
|
setChatStore({ ...chatStore });
|
||||||
|
setGeneratingMessage("");
|
||||||
|
setShowGenerating(false);
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
// manually copy status from client to chatStore
|
// manually copy status from client to chatStore
|
||||||
chatStore.maxTokens = client.max_tokens;
|
chatStore.maxTokens = client.max_tokens;
|
||||||
@@ -112,8 +170,6 @@ export function App() {
|
|||||||
await _complete();
|
await _complete();
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
alert(error);
|
alert(error);
|
||||||
} finally {
|
|
||||||
setShowGenerating(false);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -304,7 +360,10 @@ export function App() {
|
|||||||
);
|
);
|
||||||
})}
|
})}
|
||||||
{showGenerating && (
|
{showGenerating && (
|
||||||
<p className="animate-pulse">Generating... please wait...</p>
|
<p className="p-2 my-2 animate-pulse">
|
||||||
|
{generatingMessage ? generatingMessage : "生成中,保持网络稳定喵"}
|
||||||
|
...
|
||||||
|
</p>
|
||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
<div className="flex justify-between">
|
<div className="flex justify-between">
|
||||||
|
|||||||
@@ -3,6 +3,12 @@ export interface Message {
|
|||||||
content: string;
|
content: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export interface ChunkMessage {
|
||||||
|
choices: {
|
||||||
|
delta: { role: "assitant" | undefined; content: string | undefined };
|
||||||
|
}[];
|
||||||
|
}
|
||||||
|
|
||||||
class Chat {
|
class Chat {
|
||||||
OPENAI_API_KEY: string;
|
OPENAI_API_KEY: string;
|
||||||
messages: Message[];
|
messages: Message[];
|
||||||
@@ -33,6 +39,24 @@ class Chat {
|
|||||||
this.apiEndpoint = apiEndPoint;
|
this.apiEndpoint = apiEndPoint;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_fetch() {
|
||||||
|
return fetch(this.apiEndpoint, {
|
||||||
|
method: "POST",
|
||||||
|
headers: {
|
||||||
|
Authorization: `Bearer ${this.OPENAI_API_KEY}`,
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model: "gpt-3.5-turbo",
|
||||||
|
messages: [
|
||||||
|
{ role: "system", content: this.sysMessageContent },
|
||||||
|
...this.messages,
|
||||||
|
],
|
||||||
|
stream: true,
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
async fetch(): Promise<{
|
async fetch(): Promise<{
|
||||||
id: string;
|
id: string;
|
||||||
object: string;
|
object: string;
|
||||||
@@ -49,21 +73,8 @@ class Chat {
|
|||||||
index: number | undefined;
|
index: number | undefined;
|
||||||
}[];
|
}[];
|
||||||
}> {
|
}> {
|
||||||
const resp = await fetch(this.apiEndpoint, {
|
const resp = await this._fetch();
|
||||||
method: "POST",
|
return await resp.json();
|
||||||
headers: {
|
|
||||||
Authorization: `Bearer ${this.OPENAI_API_KEY}`,
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
},
|
|
||||||
body: JSON.stringify({
|
|
||||||
model: "gpt-3.5-turbo",
|
|
||||||
messages: [
|
|
||||||
{ role: "system", content: this.sysMessageContent },
|
|
||||||
...this.messages,
|
|
||||||
],
|
|
||||||
}),
|
|
||||||
}).then((resp) => resp.json());
|
|
||||||
return resp;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async say(content: string): Promise<string> {
|
async say(content: string): Promise<string> {
|
||||||
@@ -90,6 +101,14 @@ class Chat {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
completeWithSteam() {
|
||||||
|
this.total_tokens =
|
||||||
|
this.messages
|
||||||
|
.map((msg) => this.calculate_token_length(msg.content) + 20)
|
||||||
|
.reduce((a, v) => a + v);
|
||||||
|
return this._fetch();
|
||||||
|
}
|
||||||
|
|
||||||
// https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
|
// https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
|
||||||
calculate_token_length(content: string): number {
|
calculate_token_length(content: string): number {
|
||||||
const totalCount = content.length;
|
const totalCount = content.length;
|
||||||
|
|||||||
Reference in New Issue
Block a user