add param max_gen_tokens

This commit is contained in:
2023-11-08 16:16:15 +08:00
parent 9142665585
commit ed090136ac
4 changed files with 43 additions and 1 deletions

View File

@@ -29,6 +29,8 @@ export interface ChatStore {
tokenMargin: number; tokenMargin: number;
totalTokens: number; totalTokens: number;
maxTokens: number; maxTokens: number;
maxGenTokens: number;
maxGenTokens_enabled: boolean;
apiKey: string; apiKey: string;
apiEndpoint: string; apiEndpoint: string;
streamMode: boolean; streamMode: boolean;
@@ -75,6 +77,8 @@ export const newChatStore = (
tokenMargin: 1024, tokenMargin: 1024,
totalTokens: 0, totalTokens: 0,
maxTokens: models[getDefaultParams("model", model)]?.maxToken ?? 4096, maxTokens: models[getDefaultParams("model", model)]?.maxToken ?? 4096,
maxGenTokens: 2048,
maxGenTokens_enabled: true,
apiKey: getDefaultParams("key", apiKey), apiKey: getDefaultParams("key", apiKey),
apiEndpoint: getDefaultParams("api", apiEndpoint), apiEndpoint: getDefaultParams("api", apiEndpoint),
streamMode: getDefaultParams("mode", streamMode), streamMode: getDefaultParams("mode", streamMode),
@@ -165,6 +169,8 @@ export function App() {
const ret: ChatStore = await (await db).get(STORAGE_NAME, index); const ret: ChatStore = await (await db).get(STORAGE_NAME, index);
if (ret === null || ret === undefined) return newChatStore(); if (ret === null || ret === undefined) return newChatStore();
// handle read from old version chatstore // handle read from old version chatstore
if (ret.maxGenTokens === undefined) ret.maxGenTokens = 2048;
if (ret.maxGenTokens_enabled === undefined) ret.maxGenTokens_enabled = true;
if (ret.model === undefined) ret.model = "gpt-3.5-turbo"; if (ret.model === undefined) ret.model = "gpt-3.5-turbo";
if (ret.responseModelName === undefined) ret.responseModelName = ""; if (ret.responseModelName === undefined) ret.responseModelName = "";
if (ret.chatgpt_api_web_version === undefined) if (ret.chatgpt_api_web_version === undefined)

View File

@@ -184,6 +184,8 @@ export default function ChatBOX(props: {
}); });
client.model = chatStore.model; client.model = chatStore.model;
client.max_tokens = chatStore.maxTokens; client.max_tokens = chatStore.maxTokens;
client.max_gen_tokens = chatStore.maxGenTokens;
client.enable_max_gen_tokens = chatStore.maxGenTokens_enabled;
try { try {
setShowGenerating(true); setShowGenerating(true);

View File

@@ -63,6 +63,8 @@ class Chat {
sysMessageContent: string; sysMessageContent: string;
total_tokens: number; total_tokens: number;
max_tokens: number; max_tokens: number;
max_gen_tokens: number;
enable_max_gen_tokens: boolean;
tokens_margin: number; tokens_margin: number;
apiEndpoint: string; apiEndpoint: string;
model: string; model: string;
@@ -78,6 +80,8 @@ class Chat {
{ {
systemMessage = "", systemMessage = "",
max_tokens = 4096, max_tokens = 4096,
max_gen_tokens = 2048,
enable_max_gen_tokens = true,
tokens_margin = 1024, tokens_margin = 1024,
apiEndPoint = "https://api.openai.com/v1/chat/completions", apiEndPoint = "https://api.openai.com/v1/chat/completions",
model = "gpt-3.5-turbo", model = "gpt-3.5-turbo",
@@ -96,6 +100,8 @@ class Chat {
this.messages = []; this.messages = [];
this.total_tokens = calculate_token_length(systemMessage); this.total_tokens = calculate_token_length(systemMessage);
this.max_tokens = max_tokens; this.max_tokens = max_tokens;
this.max_gen_tokens = max_gen_tokens;
this.enable_max_gen_tokens = enable_max_gen_tokens;
this.tokens_margin = tokens_margin; this.tokens_margin = tokens_margin;
this.sysMessageContent = systemMessage; this.sysMessageContent = systemMessage;
this.apiEndpoint = apiEndPoint; this.apiEndpoint = apiEndPoint;
@@ -151,6 +157,9 @@ class Chat {
if (this.enable_top_p) { if (this.enable_top_p) {
body["top_p"] = this.top_p; body["top_p"] = this.top_p;
} }
if (this.enable_max_gen_tokens) {
body["max_tokens"] = this.max_gen_tokens;
}
return fetch(this.apiEndpoint, { return fetch(this.apiEndpoint, {
method: "POST", method: "POST",

View File

@@ -188,6 +188,7 @@ const Number = (props: {
field: field:
| "totalTokens" | "totalTokens"
| "maxTokens" | "maxTokens"
| "maxGenTokens"
| "tokenMargin" | "tokenMargin"
| "postBeginIndex" | "postBeginIndex"
| "presence_penalty" | "presence_penalty"
@@ -197,9 +198,27 @@ const Number = (props: {
}) => { }) => {
return ( return (
<Help help={props.help}> <Help help={props.help}>
<label className="m-2 p-2">{props.field}</label> <span>
<label className="m-2 p-2">{props.field}</label>
{props.field === "maxGenTokens" && (
<input
type="checkbox"
checked={props.chatStore.maxGenTokens_enabled}
onChange={() => {
const newChatStore = { ...props.chatStore };
newChatStore.maxGenTokens_enabled =
!newChatStore.maxGenTokens_enabled;
props.setChatStore({ ...newChatStore });
}}
/>
)}
</span>
<input <input
readOnly={props.readOnly} readOnly={props.readOnly}
disabled={
props.field === "maxGenTokens" &&
!props.chatStore.maxGenTokens_enabled
}
type="number" type="number"
className="m-2 p-2 border rounded focus w-28" className="m-2 p-2 border rounded focus w-28"
value={props.chatStore[props.field]} value={props.chatStore[props.field]}
@@ -384,6 +403,12 @@ export default (props: {
readOnly={false} readOnly={false}
{...props} {...props}
/> />
<Number
field="maxGenTokens"
help="最大生成 Tokens 数量"
readOnly={false}
{...props}
/>
<Number <Number
field="tokenMargin" field="tokenMargin"
help="当 totalTokens > maxTokens - tokenMargin 时会触发历史消息裁切chatgpt会“忘记”一部分对话中的消息但所有历史消息仍然保存在本地" help="当 totalTokens > maxTokens - tokenMargin 时会触发历史消息裁切chatgpt会“忘记”一部分对话中的消息但所有历史消息仍然保存在本地"