import { createRef } from "preact"; import { StateUpdater, useEffect, useState } from "preact/hooks"; import { ChatStore, addTotalCost } from "./app"; import ChatGPT, { calculate_token_length, ChunkMessage, FetchResponse, } from "./chatgpt"; import Message from "./message"; import models from "./models"; import Settings from "./settings"; export default function ChatBOX(props: { chatStore: ChatStore; setChatStore: (cs: ChatStore) => void; selectedChatIndex: number; setSelectedChatIndex: StateUpdater; }) { const { chatStore, setChatStore } = props; // prevent error if (chatStore === undefined) return
; const [inputMsg, setInputMsg] = useState(""); const [showGenerating, setShowGenerating] = useState(false); const [generatingMessage, setGeneratingMessage] = useState(""); const [showRetry, setShowRetry] = useState(false); const messagesEndRef = createRef(); useEffect(() => { console.log("ref", messagesEndRef); messagesEndRef.current.scrollIntoView({ behavior: "smooth" }); }, [showRetry, showGenerating, generatingMessage]); const client = new ChatGPT(chatStore.apiKey); const update_total_tokens = () => { // manually estimate token client.total_tokens = calculate_token_length( chatStore.systemMessageContent ); for (const msg of chatStore.history .filter(({ hide }) => !hide) .slice(chatStore.postBeginIndex)) { client.total_tokens += msg.token; } chatStore.totalTokens = client.total_tokens; }; const _completeWithStreamMode = async (response: Response) => { let responseTokenCount = 0; chatStore.streamMode = true; // call api, return reponse text console.log("response", response); const reader = response.body?.getReader(); const allChunkMessage: string[] = []; new ReadableStream({ async start() { let lastText = ""; while (true) { let responseDone = false; let state = await reader?.read(); let done = state?.done; let value = state?.value; if (done) break; let text = lastText + new TextDecoder().decode(value); // console.log("text:", text); const lines = text .trim() .split("\n") .map((line) => line.trim()) .filter((i) => { if (!i) return false; if (i === "data: [DONE]" || i === "data:[DONE]") { responseDone = true; responseTokenCount += 1; return false; } return true; }); responseTokenCount += lines.length; console.log("lines", lines); const jsons: ChunkMessage[] = lines .map((line) => { try { const ret = JSON.parse(line.trim().slice("data:".length)); lastText = ""; return ret; } catch (e) { console.log(`Chunk parse error at: ${line}`); lastText = line; return null; } }) .filter((i) => i); console.log("jsons", jsons); for (const { model } of jsons) { if (model) chatStore.responseModelName = model; } const chunkText = jsons .map((j) => j.choices[0].delta.content ?? "") .join(""); // console.log("chunk text", chunkText); allChunkMessage.push(chunkText); setShowGenerating(true); setGeneratingMessage(allChunkMessage.join("")); if (responseDone) break; } setShowGenerating(false); // console.log("push to history", allChunkMessage); const content = allChunkMessage.join(""); // estimate cost let cost = 0; if (chatStore.responseModelName) { cost += responseTokenCount * (models[chatStore.responseModelName]?.price?.completion ?? 0); let sum = 0; for (const msg of chatStore.history .filter(({ hide }) => !hide) .slice(chatStore.postBeginIndex)) { sum += msg.token; } cost += sum * (models[chatStore.responseModelName]?.price?.prompt ?? 0); } chatStore.cost += cost; addTotalCost(cost); chatStore.history.push({ role: "assistant", content, hide: false, token: responseTokenCount, }); // manually copy status from client to chatStore chatStore.maxTokens = client.max_tokens; chatStore.tokenMargin = client.tokens_margin; update_total_tokens(); setChatStore({ ...chatStore }); setGeneratingMessage(""); setShowGenerating(false); }, }); }; const _completeWithFetchMode = async (response: Response) => { chatStore.streamMode = false; const data = (await response.json()) as FetchResponse; chatStore.responseModelName = data.model ?? ""; if (data.model) { let cost = 0; cost += (data.usage.prompt_tokens ?? 0) * (models[data.model]?.price?.prompt ?? 0); cost += (data.usage.completion_tokens ?? 0) * (models[data.model]?.price?.completion ?? 0); chatStore.cost += cost; addTotalCost(cost); } const content = client.processFetchResponse(data); // estimate user's input message token let aboveToken = 0; for (const msg of chatStore.history .filter(({ hide }) => !hide) .slice(chatStore.postBeginIndex, -1)) { aboveToken += msg.token; } if (data.usage.prompt_tokens) { const userMessageToken = data.usage.prompt_tokens - aboveToken; console.log("set user message token"); if (chatStore.history.filter((msg) => !msg.hide).length > 0) { chatStore.history.filter((msg) => !msg.hide).slice(-1)[0].token = userMessageToken; } } chatStore.history.push({ role: "assistant", content, hide: false, token: data.usage.completion_tokens ?? calculate_token_length(content), }); setShowGenerating(false); }; // wrap the actuall complete api const complete = async () => { // manually copy status from chatStore to client client.apiEndpoint = chatStore.apiEndpoint; client.sysMessageContent = chatStore.systemMessageContent; client.tokens_margin = chatStore.tokenMargin; client.temperature = chatStore.temperature; client.top_p = chatStore.top_p; client.frequency_penalty = chatStore.frequency_penalty; client.presence_penalty = chatStore.presence_penalty; client.messages = chatStore.history // only copy non hidden message .filter(({ hide }) => !hide) .slice(chatStore.postBeginIndex) // only copy content and role attribute to client for posting .map(({ content, role }) => { return { content, role, }; }); client.model = chatStore.model; client.max_tokens = chatStore.maxTokens; try { setShowGenerating(true); const response = await client._fetch(chatStore.streamMode); const contentType = response.headers.get("content-type"); if (contentType?.startsWith("text/event-stream")) { await _completeWithStreamMode(response); } else if (contentType === "application/json") { await _completeWithFetchMode(response); } else { throw `unknown response content type ${contentType}`; } // manually copy status from client to chatStore chatStore.maxTokens = client.max_tokens; chatStore.tokenMargin = client.tokens_margin; chatStore.totalTokens = client.total_tokens; console.log("postBeginIndex", chatStore.postBeginIndex); setShowRetry(false); setChatStore({ ...chatStore }); } catch (error) { setShowRetry(true); alert(error); } finally { setShowGenerating(false); props.setSelectedChatIndex(props.selectedChatIndex); } }; // when user click the "send" button or ctrl+Enter in the textarea const send = async (msg = "") => { const inputMsg = msg; if (!inputMsg) { console.log("empty message"); return; } chatStore.responseModelName = ""; chatStore.history.push({ role: "user", content: inputMsg.trim(), hide: false, token: calculate_token_length(inputMsg.trim()), }); // manually calculate token length chatStore.totalTokens += client.calculate_token_length(inputMsg.trim()); client.total_tokens += client.calculate_token_length(inputMsg.trim()); setChatStore({ ...chatStore }); setInputMsg(""); await complete(); }; const [showSettings, setShowSettings] = useState(false); return (
{showSettings && ( )}

setShowSettings(true)} >

{" "}
{chatStore.model}{" "} Tokens:{" "} {chatStore.totalTokens}/{chatStore.maxTokens} {" "} Cut:{" "} {chatStore.postBeginIndex}/ {chatStore.history.filter(({ hide }) => !hide).length} {" "} {" "} Cost:{" "} ${chatStore.cost.toFixed(4)}

{!chatStore.apiKey && (

请先在上方设置 (OPENAI) API KEY

)} {!chatStore.apiEndpoint && (

请先在上方设置 API Endpoint

)} {chatStore.history.length === 0 && (

暂无历史对话记录
⚙Model: {chatStore.model}
⚙Key: {chatStore.apiKey}
⚙Endpoint: {chatStore.apiEndpoint}
⬆点击上方更改此对话的参数(请勿泄漏)
↖点击左上角 NEW 新建对话
请注意,使用 ChatGPT API 的生成文本质量和速度会受到会话上下文的影响,同时历史上下文过长会被裁切。API 会根据发送的上下文总量进行计费,因此建议您为不相关的问题或者不需要上文的问题创建新的对话,以避免不必要的计费。
⚠所有历史对话与参数储存在浏览器本地
⚠详细文档与源代码:{" "} github.com/heimoshuiyu/chatgpt-api-web

)} {chatStore.history.map((_, messageIndex) => ( ))} {showGenerating && (

{generatingMessage || "生成中,最长可能需要一分钟,请保持网络稳定"} ...

)} {chatStore.develop_mode && (

)}

{chatStore.responseModelName && ( <>Generated by {chatStore.responseModelName} )} {chatStore.postBeginIndex !== 0 && ( <>
提示:会话过长,已裁切前 {chatStore.postBeginIndex} 条消息 )}

{chatStore.chatgpt_api_web_version < "v1.3.0" && (


提示:当前会话版本 {chatStore.chatgpt_api_web_version}。
v1.3.0 引入与旧版不兼容的消息裁切算法。继续使用旧版可能会导致消息裁切过多或过少(表现为失去上下文或输出不完整)。
请在左上角创建新会话:)

)} {chatStore.chatgpt_api_web_version < "v1.4.0" && (


提示:当前会话版本 {chatStore.chatgpt_api_web_version} {"< v1.4.0"} 。
v1.4.0 增加了更多参数,继续使用旧版可能因参数确实导致未定义的行为
请在左上角创建新会话:)

)} {showRetry && (

)}
{chatStore.develop_mode && ( )} {chatStore.develop_mode && ( )}
); }