Compare commits

...

38 Commits

Author SHA1 Message Date
67e12e6933 change default model to gpt-3.5-turbo-0613 2023-06-14 12:16:22 +08:00
4860c6dff3 inherit chatStore.model 2023-06-14 12:14:57 +08:00
e03160d04d openai model update 2023-06-14 12:10:18 +08:00
b46b550a70 recognize text/event-stream, charset=utf-8 2023-04-19 16:21:05 +08:00
8f1a327ea0 fix total token in stream mode 2023-04-04 00:39:58 +08:00
8c049c9ee9 handle response error 2023-04-03 17:46:07 +08:00
528eb0a300 永远显示提示<p> 2023-04-03 17:46:07 +08:00
d5d077f39c Update README.md 2023-04-02 13:01:38 +08:00
b4244d3900 现在我觉得它不灵车了 2023-04-01 19:57:59 +08:00
8f3d69d2a2 fix todo: estimate user's token 2023-04-01 12:35:26 +08:00
11d9b09e36 prevent undefined on new models 2023-03-31 15:00:57 +08:00
464e417537 switch model with token 2023-03-31 05:14:44 +08:00
5a328db87d cost in stream mode 2023-03-31 05:09:25 +08:00
3b09abaf66 header 2023-03-31 05:00:52 +08:00
05f57f29e5 show cost 2023-03-31 04:50:49 +08:00
11afa12b09 fix show version warning 2023-03-31 04:41:08 +08:00
26f9632f41 record each message's token and hide status, calc postBeginIndex based on token 2023-03-31 04:16:23 +08:00
bdfe03699f support import chat store 2023-03-30 14:33:54 +08:00
fecfc24519 support export as json 2023-03-30 13:56:03 +08:00
07885c681c show response model name 2023-03-30 13:39:19 +08:00
faac2303df gpt-4 内测提示 2023-03-30 13:01:28 +08:00
fc17d6ba15 fix copy max_token from chaStore to client 2023-03-29 16:42:57 +08:00
3de689a796 use gpt-3.5-turbo as default new ChatStore 2023-03-29 15:51:04 +08:00
35ee9cab0e highlight status bar 2023-03-29 15:48:49 +08:00
5fc2c62b4f select chat index after fetch 2023-03-29 15:37:36 +08:00
c31c6cd84a set maxToken based on model 2023-03-29 13:02:48 +08:00
6406993e83 handle create new chatstore on diff model 2023-03-29 12:51:31 +08:00
2d7edeb5b0 support gpt-4 2023-03-29 12:45:59 +08:00
1158fdca38 update stream mode based on response 2023-03-28 21:18:30 +08:00
7c34379ecb calculate token and forget some message 2023-03-28 21:12:34 +08:00
26a66d112b create chatStore if not equal params 2023-03-27 13:43:06 +08:00
e791367d2d break all text 2023-03-27 13:18:30 +08:00
30abf3ed15 auto create new chatStore if there any params in URL 2023-03-27 13:15:23 +08:00
146f34a22d 更好的提示 2023-03-26 21:00:28 +08:00
d5a8799fde issue caused by height: 100vh 2023-03-26 19:30:03 +08:00
241a93b151 更改默认 system message 2023-03-26 18:36:51 +08:00
a4b762586c 更好的文档和提示 2023-03-26 14:04:53 +08:00
700c424d64 更好的提示 2023-03-26 00:11:11 +08:00
9 changed files with 545 additions and 95 deletions

View File

@@ -1,9 +1,11 @@
> 前排提示:滥用 API 或在不支持的地区调用 API 有被封号的风险 <https://github.com/zhayujie/chatgpt-on-wechat/issues/423>
>
> 建议自行搭建代理中转 API 请求,然后更改对话设置中的 API Endpoint 参数使用中转
>
> 具体反向代理搭建教程请参阅此 [>>Wiki页面<<](https://github.com/heimoshuiyu/chatgpt-api-web/wiki)
# ChatGPT API WEB
> 灵车东西,做着玩儿的
一个简单的网页,调用 OPENAI ChatGPT 进行对话。
![build status](https://github.com/heimoshuiyu/chatgpt-api-web/actions/workflows/pages.yml/badge.svg)
@@ -13,7 +15,14 @@
- API 调用速度更快更稳定
- 对话记录、API 密钥等使用浏览器的 localStorage 保存在本地
- 可删除对话消息
- 可以设置 system message (如:"你是一个猫娘" 或 "你是一个有用的助理" 或 "将我的话翻译成英语",参见官方 [API 文档](https://platform.openai.com/docs/guides/chat))
- 可以导入/导出整个历史对话记录
- 可以设置 system message (参见官方 [API 文档](https://platform.openai.com/docs/guides/chat)) 例如:
- > 你是一个有用的有用的人工智能助理
- > You are a helpful assistant
- > 你是一个专业英语翻译,把我说的话翻译成英语,为了保持通顺连贯可以适当修改内容。
- > 根据我的描述给出适用于 Stable Diffusion 的 prompt 和 negative prompt用英文回答要求尽量长一些。
- > 根据我的要求撰写并修改商业文案
- > ~~你是一个猫娘,你要用猫娘的语气说话~~
- 可以为不同对话设置不同 APIKEY
- 小(整个网页 30k 左右)
- 可以设置不同的 API Endpoint方便墙内人士使用反向代理转发 API 请求)
@@ -30,6 +39,12 @@
- 从 [release](https://github.com/heimoshuiyu/chatgpt-api-web/releases) 下载网页文件,或在 [github pages](https://heimoshuiyu.github.io/chatgpt-api-web/) 按 `ctrl+s` 保存网页,然后双击打开
- 自行编译构建网页
### 默认参数继承
新建会话将会使用 URL 中设置的默认参数。
如果 URL 没有设置该参数,则使用 **目前选中的会话** 的参数
### 更改默认参数
- `key`: OPENAI API KEY 默认为空
@@ -48,4 +63,4 @@ yarn install
yarn build
```
构建产物在 `dist` 文件夹中
构建产物在 `dist` 文件夹中

View File

@@ -0,0 +1,3 @@
const CHATGPT_API_WEB_VERSION = "v1.3.0";
export default CHATGPT_API_WEB_VERSION;

View File

@@ -1,13 +1,22 @@
import { useEffect, useState } from "preact/hooks";
import "./global.css";
import { Message } from "./chatgpt";
import { calculate_token_length, Message } from "./chatgpt";
import getDefaultParams from "./getDefaultParam";
import ChatBOX from "./chatbox";
import models from "./models";
import CHATGPT_API_WEB_VERSION from "./CHATGPT_API_WEB_VERSION";
export interface ChatStoreMessage extends Message {
hide: boolean;
token: number;
}
export interface ChatStore {
chatgpt_api_web_version: string;
systemMessageContent: string;
history: Message[];
history: ChatStoreMessage[];
postBeginIndex: number;
tokenMargin: number;
totalTokens: number;
@@ -15,25 +24,33 @@ export interface ChatStore {
apiKey: string;
apiEndpoint: string;
streamMode: boolean;
model: string;
responseModelName: string;
cost: number;
}
const _defaultAPIEndpoint = "https://api.openai.com/v1/chat/completions";
const newChatStore = (
apiKey = "",
systemMessageContent = "你是一个有用的人工智能助理",
systemMessageContent = "你是一个有用的人工智能助理,根据我的提问和要求回答我的问题",
apiEndpoint = _defaultAPIEndpoint,
streamMode = true
streamMode = true,
model = "gpt-3.5-turbo-0613"
): ChatStore => {
return {
chatgpt_api_web_version: CHATGPT_API_WEB_VERSION,
systemMessageContent: getDefaultParams("sys", systemMessageContent),
history: [],
postBeginIndex: 0,
tokenMargin: 1024,
totalTokens: 0,
maxTokens: 4096,
maxTokens: models[getDefaultParams("model", model)]?.maxToken ?? 4096,
apiKey: getDefaultParams("key", apiKey),
apiEndpoint: getDefaultParams("api", apiEndpoint),
streamMode: getDefaultParams("mode", streamMode),
model: getDefaultParams("model", model),
responseModelName: "",
cost: 0,
};
};
@@ -71,49 +88,104 @@ export function App() {
const key = `${STORAGE_NAME}-${index}`;
const val = localStorage.getItem(key);
if (val === null) return newChatStore();
return JSON.parse(val) as ChatStore;
const ret = JSON.parse(val) as ChatStore;
// handle read from old version chatstore
if (ret.model === undefined) ret.model = "gpt-3.5-turbo";
if (ret.responseModelName === undefined) ret.responseModelName = "";
if (ret.chatgpt_api_web_version === undefined)
// this is from old version becasue it is undefined,
// so no higher than v1.3.0
ret.chatgpt_api_web_version = "v1.2.2";
for (const message of ret.history) {
if (message.hide === undefined) message.hide = false;
if (message.token === undefined)
message.token = calculate_token_length(message.content);
}
if (ret.cost === undefined) ret.cost = 0;
return ret;
};
const [chatStore, _setChatStore] = useState(
getChatStoreByIndex(selectedChatIndex)
);
const setChatStore = (cs: ChatStore) => {
const setChatStore = (chatStore: ChatStore) => {
console.log("saved chat", selectedChatIndex, chatStore);
localStorage.setItem(
`${STORAGE_NAME}-${selectedChatIndex}`,
JSON.stringify(cs)
JSON.stringify(chatStore)
);
_setChatStore(cs);
console.log("recalculate postBeginIndex");
const max = chatStore.maxTokens - chatStore.tokenMargin;
let sum = 0;
chatStore.postBeginIndex = chatStore.history.filter(
({ hide }) => !hide
).length;
for (const msg of chatStore.history
.filter(({ hide }) => !hide)
.slice()
.reverse()) {
if (sum + msg.token > max) break;
sum += msg.token;
chatStore.postBeginIndex -= 1;
}
chatStore.postBeginIndex =
chatStore.postBeginIndex < 0 ? 0 : chatStore.postBeginIndex;
_setChatStore(chatStore);
};
useEffect(() => {
_setChatStore(getChatStoreByIndex(selectedChatIndex));
}, [selectedChatIndex]);
const handleNewChatStore = () => {
const max = Math.max(...allChatStoreIndexes);
const next = max + 1;
console.log("save next chat", next);
localStorage.setItem(
`${STORAGE_NAME}-${next}`,
JSON.stringify(
newChatStore(
chatStore.apiKey,
chatStore.systemMessageContent,
chatStore.apiEndpoint,
chatStore.streamMode,
chatStore.model
)
)
);
allChatStoreIndexes.push(next);
setAllChatStoreIndexes([...allChatStoreIndexes]);
setSelectedChatIndex(next);
};
// if there are any params in URL, create a new chatStore
useEffect(() => {
const api = getDefaultParams("api", "");
const key = getDefaultParams("key", "");
const sys = getDefaultParams("sys", "");
const mode = getDefaultParams("mode", "");
const model = getDefaultParams("model", "");
// only create new chatStore if the params in URL are NOT
// equal to the current selected chatStore
if (
(api && api !== chatStore.apiEndpoint) ||
(key && key !== chatStore.apiKey) ||
(sys && sys !== chatStore.systemMessageContent) ||
(mode && mode !== (chatStore.streamMode ? "stream" : "fetch")) ||
(model && model !== chatStore.model)
) {
handleNewChatStore();
}
}, []);
return (
<div className="flex text-sm h-screen bg-slate-200 dark:bg-slate-800 dark:text-white">
<div className="flex text-sm h-full bg-slate-200 dark:bg-slate-800 dark:text-white">
<div className="flex flex-col h-full p-2 border-r-indigo-500 border-2 dark:border-slate-800 dark:border-r-indigo-500 dark:text-black">
<div className="grow overflow-scroll">
<button
className="bg-violet-300 p-1 rounded hover:bg-violet-400"
onClick={() => {
const max = Math.max(...allChatStoreIndexes);
const next = max + 1;
console.log("save next chat", next);
localStorage.setItem(
`${STORAGE_NAME}-${next}`,
JSON.stringify(
newChatStore(
chatStore.apiKey,
chatStore.systemMessageContent,
chatStore.apiEndpoint,
chatStore.streamMode
)
)
);
allChatStoreIndexes.push(next);
setAllChatStoreIndexes([...allChatStoreIndexes]);
setSelectedChatIndex(next);
}}
onClick={handleNewChatStore}
>
NEW
</button>
@@ -126,7 +198,7 @@ export function App() {
return (
<li>
<button
className={`w-full my-1 p-1 rounded hover:bg-blue-300 ${
className={`w-full my-1 p-1 rounded hover:bg-blue-500 ${
i === selectedChatIndex ? "bg-blue-500" : "bg-blue-200"
}`}
onClick={() => {
@@ -158,7 +230,8 @@ export function App() {
chatStore.apiKey,
chatStore.systemMessageContent,
chatStore.apiEndpoint,
chatStore.streamMode
chatStore.streamMode,
chatStore.model
)
);
}
@@ -175,7 +248,12 @@ export function App() {
DEL
</button>
</div>
<ChatBOX chatStore={chatStore} setChatStore={setChatStore} />
<ChatBOX
chatStore={chatStore}
setChatStore={setChatStore}
selectedChatIndex={selectedChatIndex}
setSelectedChatIndex={setSelectedChatIndex}
/>
</div>
);
}

View File

@@ -1,13 +1,20 @@
import { createRef } from "preact";
import { useEffect, useState } from "preact/hooks";
import { StateUpdater, useEffect, useState } from "preact/hooks";
import type { ChatStore } from "./app";
import ChatGPT, { ChunkMessage, FetchResponse } from "./chatgpt";
import ChatGPT, {
calculate_token_length,
ChunkMessage,
FetchResponse,
} from "./chatgpt";
import Message from "./message";
import models from "./models";
import Settings from "./settings";
export default function ChatBOX(props: {
chatStore: ChatStore;
setChatStore: (cs: ChatStore) => void;
selectedChatIndex: number;
setSelectedChatIndex: StateUpdater<number>;
}) {
const { chatStore, setChatStore } = props;
// prevent error
@@ -26,6 +33,7 @@ export default function ChatBOX(props: {
const client = new ChatGPT(chatStore.apiKey);
const _completeWithStreamMode = async (response: Response) => {
chatStore.streamMode = true;
// call api, return reponse text
console.log("response", response);
const reader = response.body?.getReader();
@@ -58,7 +66,10 @@ export default function ChatBOX(props: {
return JSON.parse(line.trim().slice("data: ".length));
})
.filter((i) => i);
// console.log("jsons", jsons);
console.log("jsons", jsons);
for (const { model } of jsons) {
if (model) chatStore.responseModelName = model;
}
const chunkText = jsons
.map((j) => j.choices[0].delta.content ?? "")
.join("");
@@ -71,17 +82,39 @@ export default function ChatBOX(props: {
setShowGenerating(false);
// console.log("push to history", allChunkMessage);
const content = allChunkMessage.join("");
const token = calculate_token_length(content);
// estimate cost
if (chatStore.responseModelName) {
chatStore.cost +=
token *
(models[chatStore.responseModelName]?.price?.completion ?? 0);
let sum = 0;
for (const msg of chatStore.history
.filter(({ hide }) => !hide)
.slice(chatStore.postBeginIndex)) {
sum += msg.token;
}
chatStore.cost +=
sum * (models[chatStore.responseModelName]?.price?.prompt ?? 0);
}
chatStore.history.push({
role: "assistant",
content: allChunkMessage.join(""),
content,
hide: false,
token,
});
// manually copy status from client to chatStore
chatStore.maxTokens = client.max_tokens;
chatStore.tokenMargin = client.tokens_margin;
chatStore.totalTokens =
client.total_tokens +
39 +
client.calculate_token_length(allChunkMessage.join(""));
// manually estimate token
client.total_tokens = 0;
for (const msg of chatStore.history
.filter(({ hide }) => !hide)
.slice(chatStore.postBeginIndex)) {
client.total_tokens += msg.token;
}
chatStore.totalTokens = client.total_tokens;
setChatStore({ ...chatStore });
setGeneratingMessage("");
setShowGenerating(false);
@@ -90,9 +123,38 @@ export default function ChatBOX(props: {
};
const _completeWithFetchMode = async (response: Response) => {
chatStore.streamMode = false;
const data = (await response.json()) as FetchResponse;
chatStore.responseModelName = data.model ?? "";
if (data.model) {
chatStore.cost +=
(data.usage.prompt_tokens ?? 0) *
(models[data.model]?.price?.prompt ?? 0);
chatStore.cost +=
(data.usage.completion_tokens ?? 0) *
(models[data.model]?.price?.completion ?? 0);
}
const content = client.processFetchResponse(data);
chatStore.history.push({ role: "assistant", content });
// estimate user's input message token
let aboveToken = 0;
for (const msg of chatStore.history
.filter(({ hide }) => !hide)
.slice(chatStore.postBeginIndex, -1)) {
aboveToken += msg.token;
}
if (data.usage.prompt_tokens) {
const userMessageToken = data.usage.prompt_tokens - aboveToken;
console.log("set user message token");
chatStore.history.slice(-1)[0].token = userMessageToken;
}
chatStore.history.push({
role: "assistant",
content,
hide: false,
token: data.usage.completion_tokens ?? calculate_token_length(content),
});
setShowGenerating(false);
};
@@ -101,12 +163,26 @@ export default function ChatBOX(props: {
// manually copy status from chatStore to client
client.apiEndpoint = chatStore.apiEndpoint;
client.sysMessageContent = chatStore.systemMessageContent;
client.messages = chatStore.history.slice(chatStore.postBeginIndex);
client.tokens_margin = chatStore.tokenMargin;
client.messages = chatStore.history
// only copy non hidden message
.filter(({ hide }) => !hide)
.slice(chatStore.postBeginIndex)
// only copy content and role attribute to client for posting
.map(({ content, role }) => {
return {
content,
role,
};
});
client.model = chatStore.model;
client.max_tokens = chatStore.maxTokens;
try {
setShowGenerating(true);
const response = await client._fetch(chatStore.streamMode);
const contentType = response.headers.get("content-type");
if (contentType === "text/event-stream") {
if (contentType?.startsWith("text/event-stream")) {
await _completeWithStreamMode(response);
} else if (contentType === "application/json") {
await _completeWithFetchMode(response);
@@ -117,11 +193,7 @@ export default function ChatBOX(props: {
chatStore.maxTokens = client.max_tokens;
chatStore.tokenMargin = client.tokens_margin;
chatStore.totalTokens = client.total_tokens;
// when total token > max token - margin token:
// ChatGPT will "forgot" some historical message
// so client.message.length will be less than chatStore.history.length
chatStore.postBeginIndex =
chatStore.history.length - client.messages.length;
console.log("postBeginIndex", chatStore.postBeginIndex);
setChatStore({ ...chatStore });
} catch (error) {
@@ -129,6 +201,7 @@ export default function ChatBOX(props: {
alert(error);
} finally {
setShowGenerating(false);
props.setSelectedChatIndex(props.selectedChatIndex);
}
};
@@ -139,7 +212,16 @@ export default function ChatBOX(props: {
console.log("empty message");
return;
}
chatStore.history.push({ role: "user", content: inputMsg.trim() });
chatStore.responseModelName = "";
chatStore.history.push({
role: "user",
content: inputMsg.trim(),
hide: false,
token: calculate_token_length(inputMsg.trim()),
});
// manually calculate token length
chatStore.totalTokens += client.calculate_token_length(inputMsg.trim());
client.total_tokens += client.calculate_token_length(inputMsg.trim());
setChatStore({ ...chatStore });
setInputMsg("");
await complete();
@@ -153,9 +235,10 @@ export default function ChatBOX(props: {
setChatStore={setChatStore}
show={showSettings}
setShow={setShowSettings}
selectedChatStoreIndex={props.selectedChatIndex}
/>
<p
className="cursor-pointer dark:text-white"
className="cursor-pointer rounded bg-cyan-300 dark:text-white p-1 dark:bg-cyan-800"
onClick={() => setShowSettings(true)}
>
<div>
@@ -169,11 +252,24 @@ export default function ChatBOX(props: {
</button>
</div>
<div className="text-xs">
<span>Total: {chatStore.totalTokens}</span>{" "}
<span>Max: {chatStore.maxTokens}</span>{" "}
<span>Margin: {chatStore.tokenMargin}</span>{" "}
<span>Message: {chatStore.history.length}</span>{" "}
<span>Cut: {chatStore.postBeginIndex}</span>
<span className="underline">{chatStore.model}</span>{" "}
<span>
Tokens:{" "}
<span className="underline">
{chatStore.totalTokens}/{chatStore.maxTokens}
</span>
</span>{" "}
<span>
Cut:{" "}
<span className="underline">
{chatStore.postBeginIndex}/
{chatStore.history.filter(({ hide }) => !hide).length}
</span>{" "}
</span>{" "}
<span>
Cost:{" "}
<span className="underline">${chatStore.cost.toFixed(4)}</span>
</span>
</div>
</p>
<div className="grow overflow-scroll">
@@ -188,8 +284,33 @@ export default function ChatBOX(props: {
</p>
)}
{chatStore.history.length === 0 && (
<p className="opacity-60 p-6 rounded bg-white my-3 text-left dark:text-black">
<p className="break-all opacity-60 p-6 rounded bg-white my-3 text-left dark:text-black">
<br />
Model: {chatStore.model}
<br />
Key: {chatStore.apiKey}
<br />
Endpoint: {chatStore.apiEndpoint}
<br />
<br />
NEW
<br />
使 ChatGPT API
API
<br />
<br />
:{" "}
<a
className="underline"
href="https://github.com/heimoshuiyu/chatgpt-api-web"
target="_blank"
>
github.com/heimoshuiyu/chatgpt-api-web
</a>
</p>
)}
{chatStore.history.map((_, messageIndex) => (
@@ -203,10 +324,32 @@ export default function ChatBOX(props: {
<p className="p-2 my-2 animate-pulse dark:text-white">
{generatingMessage
? generatingMessage.split("\n").map((line) => <p>{line}</p>)
: "生成中,请保持网络稳定"}
: "生成中,最长可能需要一分钟,请保持网络稳定"}
...
</p>
)}
<p className="p-2 my-2 text-center opacity-50 dark:text-white">
{chatStore.responseModelName && (
<>Generated by {chatStore.responseModelName}</>
)}
{chatStore.postBeginIndex !== 0 && (
<>
<br />
{chatStore.postBeginIndex}
</>
)}
</p>
{chatStore.chatgpt_api_web_version < "v1.3.0" && (
<p className="p-2 my-2 text-center dark:text-white">
<br />
{chatStore.chatgpt_api_web_version}
<br />
v1.3.0
使
<br />
</p>
)}
{showRetry && (
<p className="text-right p-2 my-2 dark:text-white">
<button

View File

@@ -4,12 +4,14 @@ export interface Message {
}
export interface ChunkMessage {
model: string;
choices: {
delta: { role: "assitant" | undefined; content: string | undefined };
}[];
}
export interface FetchResponse {
error?: any;
id: string;
object: string;
created: number;
@@ -25,6 +27,14 @@ export interface FetchResponse {
index: number | undefined;
}[];
}
// https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
export function calculate_token_length(content: string): number {
const totalCount = content.length;
const chineseCount = content.match(/[\u00ff-\uffff]|\S+/g)?.length ?? 0;
const englishCount = totalCount - chineseCount;
const tokenLength = englishCount / 4 + (chineseCount * 4) / 3;
return ~~tokenLength;
}
class Chat {
OPENAI_API_KEY: string;
@@ -34,6 +44,7 @@ class Chat {
max_tokens: number;
tokens_margin: number;
apiEndpoint: string;
model: string;
constructor(
OPENAI_API_KEY: string | undefined,
@@ -42,6 +53,7 @@ class Chat {
max_tokens = 4096,
tokens_margin = 1024,
apiEndPoint = "https://api.openai.com/v1/chat/completions",
model = "gpt-3.5-turbo",
} = {}
) {
if (OPENAI_API_KEY === undefined) {
@@ -54,6 +66,7 @@ class Chat {
this.tokens_margin = tokens_margin;
this.sysMessageContent = systemMessage;
this.apiEndpoint = apiEndPoint;
this.model = model;
}
_fetch(stream = false) {
@@ -64,7 +77,7 @@ class Chat {
"Content-Type": "application/json",
},
body: JSON.stringify({
model: "gpt-3.5-turbo",
model: this.model,
messages: [
{ role: "system", content: this.sysMessageContent },
...this.messages,
@@ -76,7 +89,11 @@ class Chat {
async fetch(): Promise<FetchResponse> {
const resp = await this._fetch();
return await resp.json();
const j = await resp.json();
if (j.error !== undefined) {
throw JSON.stringify(j.error);
}
return j;
}
async say(content: string): Promise<string> {
@@ -86,6 +103,9 @@ class Chat {
}
processFetchResponse(resp: FetchResponse): string {
if (resp.error !== undefined) {
throw JSON.stringify(resp.error);
}
this.total_tokens = resp?.usage?.total_tokens ?? 0;
if (resp?.choices[0]?.message) {
this.messages.push(resp?.choices[0]?.message);
@@ -114,13 +134,8 @@ class Chat {
return this._fetch(true);
}
// https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
calculate_token_length(content: string): number {
const totalCount = content.length;
const chineseCount = content.match(/[\u00ff-\uffff]|\S+/g)?.length ?? 0;
const englishCount = totalCount - chineseCount;
const tokenLength = englishCount / 4 + (chineseCount * 4) / 3;
return ~~tokenLength;
return calculate_token_length(content);
}
user(...messages: string[]) {

View File

@@ -2,6 +2,12 @@
@tailwind components;
@tailwind utilities;
html,
body,
#app {
height: 100%;
}
/* Hide scrollbar for webkit based browsers */
::-webkit-scrollbar {
display: none;

View File

@@ -1,4 +1,5 @@
import { ChatStore } from "./app";
import { calculate_token_length } from "./chatgpt";
interface Props {
messageIndex: number;
@@ -14,39 +15,57 @@ export default function Message(props: Props) {
chat.role === "user" ? "left-0" : "right-0"
}`}
onClick={() => {
if (
confirm(
`Are you sure to delete this message?\n${chat.content.slice(
0,
39
)}...`
)
) {
chatStore.history.splice(messageIndex, 1);
chatStore.postBeginIndex = Math.max(chatStore.postBeginIndex - 1, 0);
setChatStore({ ...chatStore });
chatStore.history[messageIndex].hide =
!chatStore.history[messageIndex].hide;
//chatStore.totalTokens =
chatStore.totalTokens = 0;
for (const i of chatStore.history
.filter(({ hide }) => !hide)
.slice(chatStore.postBeginIndex)
.map(({ token }) => token)) {
chatStore.totalTokens += i;
}
setChatStore({ ...chatStore });
}}
>
🗑
</button>
);
return (
<div
className={`flex ${
chat.role === "assistant" ? "justify-start" : "justify-end"
}`}
>
<>
{chatStore.postBeginIndex !== 0 &&
!chatStore.history[messageIndex].hide &&
chatStore.postBeginIndex ===
chatStore.history.slice(0, messageIndex).filter(({ hide }) => !hide)
.length && (
<div className="flex items-center relative justify-center">
<hr className="w-full h-px my-4 border-0 bg-slate-800 dark:bg-white" />
<span className="absolute px-3 bg-slate-800 text-white rounded p-1 dark:bg-white dark:text-black">
Above messages are "forgotten"
</span>
</div>
)}
<div
className={`relative w-fit p-2 rounded my-2 ${
chat.role === "assistant"
? "bg-white dark:bg-gray-700 dark:text-white"
: "bg-green-400"
className={`flex ${
chat.role === "assistant" ? "justify-start" : "justify-end"
}`}
>
<p className="message-content">{chat.content}</p>
<DeleteIcon />
<div
className={`relative w-fit p-2 rounded my-2 ${
chat.role === "assistant"
? "bg-white dark:bg-gray-700 dark:text-white"
: "bg-green-400"
} ${chat.hide ? "opacity-50" : ""}`}
>
<p className="message-content">
{chat.hide
? chat.content.split("\n")[0].slice(0, 16) + "... (deleted)"
: chat.content}
</p>
<DeleteIcon />
</div>
</div>
</div>
</>
);
}

56
src/models.ts Normal file
View File

@@ -0,0 +1,56 @@
interface Model {
maxToken: number;
price: {
prompt: number;
completion: number;
};
}
const models: Record<string, Model> = {
"gpt-3.5-turbo": {
maxToken: 4096,
price: { prompt: 0.0015 / 1000, completion: 0.002 / 1000 },
},
"gpt-3.5-turbo-16k": {
maxToken: 16384,
price: { prompt: 0.0003 / 1000, completion: 0.004 / 1000 },
},
"gpt-3.5-turbo-0613": {
maxToken: 4096,
price: { prompt: 0.0015 / 1000, completion: 0.002 / 1000 },
},
"gpt-3.5-turbo-16k-0613": {
maxToken: 16384,
price: { prompt: 0.0003 / 1000, completion: 0.004 / 1000 },
},
"gpt-3.5-turbo-0301": {
maxToken: 4096,
price: { prompt: 0.0015 / 1000, completion: 0.002 / 1000 },
},
"gpt-4": {
maxToken: 8192,
price: { prompt: 0.03 / 1000, completion: 0.06 / 1000 },
},
"gpt-4-0613": {
maxToken: 8192,
price: { prompt: 0.03 / 1000, completion: 0.06 / 1000 },
},
"gpt-4-32k": {
maxToken: 8192,
price: { prompt: 0.06 / 1000, completion: 0.12 / 1000 },
},
"gpt-4-32k-0613": {
maxToken: 8192,
price: { prompt: 0.06 / 1000, completion: 0.12 / 1000 },
},
"gpt-4-0314": {
maxToken: 8192,
price: { prompt: 0.03 / 1000, completion: 0.06 / 1000 },
},
"gpt-4-32k-0314": {
maxToken: 8192,
price: { prompt: 0.06 / 1000, completion: 0.12 / 1000 },
},
};
export default models;

View File

@@ -1,5 +1,7 @@
import { createRef } from "preact";
import { StateUpdater } from "preact/hooks";
import { ChatStore } from "./app";
import models from "./models";
const Help = (props: { children: any; help: string }) => {
return (
@@ -17,6 +19,32 @@ const Help = (props: { children: any; help: string }) => {
);
};
const SelectModel = (props: {
chatStore: ChatStore;
setChatStore: (cs: ChatStore) => void;
help: string;
}) => {
return (
<Help help={props.help}>
<label className="m-2 p-2">Model</label>
<select
className="m-2 p-2"
value={props.chatStore.model}
onChange={(event: any) => {
const model = event.target.value as string;
props.chatStore.model = model;
props.chatStore.maxTokens = models[model].maxToken;
props.setChatStore({ ...props.chatStore });
}}
>
{Object.keys(models).map((opt) => (
<option value={opt}>{opt}</option>
))}
</select>
</Help>
);
};
const Input = (props: {
chatStore: ChatStore;
setChatStore: (cs: ChatStore) => void;
@@ -90,6 +118,7 @@ export default (props: {
setChatStore: (cs: ChatStore) => void;
show: boolean;
setShow: StateUpdater<boolean>;
selectedChatStoreIndex: number;
}) => {
if (!props.show) return <div></div>;
const link =
@@ -101,12 +130,19 @@ export default (props: {
props.chatStore.apiKey
)}&api=${encodeURIComponent(props.chatStore.apiEndpoint)}&mode=${
props.chatStore.streamMode ? "stream" : "fetch"
}&sys=${encodeURIComponent(props.chatStore.systemMessageContent)}`;
}&model=${props.chatStore.model}&sys=${encodeURIComponent(
props.chatStore.systemMessageContent
)}`;
const importFileRef = createRef();
return (
<div className="left-0 top-0 overflow-scroll flex justify-center absolute w-screen h-screen bg-black bg-opacity-50 z-10">
<div className="left-0 top-0 overflow-scroll flex justify-center absolute w-screen h-full bg-black bg-opacity-50 z-10">
<div className="m-2 p-2 bg-white rounded-lg h-fit">
<h3 className="text-xl">Settings</h3>
<hr />
<p className="m-2 p-2">
Total cost in this section ${props.chatStore.cost.toFixed(4)}
</p>
<div className="box">
<Input
field="systemMessageContent"
@@ -128,9 +164,13 @@ export default (props: {
help="流模式,使用 stream mode 将可以动态看到生成内容,但无法准确计算 token 数量,在 token 数量过多时可能会裁切过多或过少历史消息"
{...props}
/>
<SelectModel
help="模型,默认 3.5。不同模型性能和定价也不同,请参考 API 文档。GPT-4 模型处于内测阶段,需要向 OPENAI 申请, 请确保您有访问权限)"
{...props}
/>
<Number
field="maxTokens"
help="最大 token 数量,这个详情参考 OPENAI API 文档"
help="最大 token 数量。如果使用非gpt-3.5模型请手动修改上限。gpt-4 & gpt-4-0314: 8192。gpt-4-32k & gpt-4-32k-0314: 32768"
readOnly={false}
{...props}
/>
@@ -152,6 +192,81 @@ export default (props: {
readOnly={true}
{...props}
/>
<p className="flex justify-evenly">
<button
className="p-2 m-2 rounded bg-amber-500"
onClick={() => {
let dataStr =
"data:text/json;charset=utf-8," +
encodeURIComponent(
JSON.stringify(props.chatStore, null, "\t")
);
let downloadAnchorNode = document.createElement("a");
downloadAnchorNode.setAttribute("href", dataStr);
downloadAnchorNode.setAttribute(
"download",
`chatgpt-api-web-${props.selectedChatStoreIndex}.json`
);
document.body.appendChild(downloadAnchorNode); // required for firefox
downloadAnchorNode.click();
downloadAnchorNode.remove();
}}
>
Export
</button>
<button
className="p-2 m-2 rounded bg-amber-500"
onClick={() => {
if (
!confirm(
"This will OVERWRITE the current chat history! Continue?"
)
)
return;
console.log("importFileRef", importFileRef);
importFileRef.current.click();
}}
>
Import
</button>
<input
className="hidden"
ref={importFileRef}
type="file"
onChange={() => {
const file = importFileRef.current.files[0];
console.log("file to import", file);
if (!file || file.type !== "application/json") {
alert("Please select a json file");
return;
}
const reader = new FileReader();
reader.onload = () => {
console.log("import content", reader.result);
if (!reader) {
alert("Empty file");
return;
}
try {
const newChatStore: ChatStore = JSON.parse(
reader.result as string
);
if (!newChatStore.chatgpt_api_web_version) {
throw "This is not an exported chatgpt-api-web chatstore file. The key 'chatgpt_api_web_version' is missing!";
}
props.setChatStore({ ...newChatStore });
} catch (e) {
alert(`Import error on parsing json: ${e}`);
}
};
reader.readAsText(file);
}}
/>
</p>
<p className="text-center m-2 p-2">
chatgpt-api-web ChatStore Version{" "}
{props.chatStore.chatgpt_api_web_version}
</p>
</div>
<hr />
<div className="flex justify-between">