diff --git a/src/app.tsx b/src/app.tsx index 73e015e..d12c633 100644 --- a/src/app.tsx +++ b/src/app.tsx @@ -29,6 +29,8 @@ export interface ChatStore { tokenMargin: number; totalTokens: number; maxTokens: number; + maxGenTokens: number; + maxGenTokens_enabled: boolean; apiKey: string; apiEndpoint: string; streamMode: boolean; @@ -75,6 +77,8 @@ export const newChatStore = ( tokenMargin: 1024, totalTokens: 0, maxTokens: models[getDefaultParams("model", model)]?.maxToken ?? 4096, + maxGenTokens: 2048, + maxGenTokens_enabled: true, apiKey: getDefaultParams("key", apiKey), apiEndpoint: getDefaultParams("api", apiEndpoint), streamMode: getDefaultParams("mode", streamMode), @@ -165,6 +169,8 @@ export function App() { const ret: ChatStore = await (await db).get(STORAGE_NAME, index); if (ret === null || ret === undefined) return newChatStore(); // handle read from old version chatstore + if (ret.maxGenTokens === undefined) ret.maxGenTokens = 2048; + if (ret.maxGenTokens_enabled === undefined) ret.maxGenTokens_enabled = true; if (ret.model === undefined) ret.model = "gpt-3.5-turbo"; if (ret.responseModelName === undefined) ret.responseModelName = ""; if (ret.chatgpt_api_web_version === undefined) diff --git a/src/chatbox.tsx b/src/chatbox.tsx index d523f08..63b8469 100644 --- a/src/chatbox.tsx +++ b/src/chatbox.tsx @@ -184,6 +184,8 @@ export default function ChatBOX(props: { }); client.model = chatStore.model; client.max_tokens = chatStore.maxTokens; + client.max_gen_tokens = chatStore.maxGenTokens; + client.enable_max_gen_tokens = chatStore.maxGenTokens_enabled; try { setShowGenerating(true); diff --git a/src/chatgpt.ts b/src/chatgpt.ts index e96cd03..0646c4b 100644 --- a/src/chatgpt.ts +++ b/src/chatgpt.ts @@ -63,6 +63,8 @@ class Chat { sysMessageContent: string; total_tokens: number; max_tokens: number; + max_gen_tokens: number; + enable_max_gen_tokens: boolean; tokens_margin: number; apiEndpoint: string; model: string; @@ -78,6 +80,8 @@ class Chat { { systemMessage = "", max_tokens = 4096, + max_gen_tokens = 2048, + enable_max_gen_tokens = true, tokens_margin = 1024, apiEndPoint = "https://api.openai.com/v1/chat/completions", model = "gpt-3.5-turbo", @@ -96,6 +100,8 @@ class Chat { this.messages = []; this.total_tokens = calculate_token_length(systemMessage); this.max_tokens = max_tokens; + this.max_gen_tokens = max_gen_tokens; + this.enable_max_gen_tokens = enable_max_gen_tokens; this.tokens_margin = tokens_margin; this.sysMessageContent = systemMessage; this.apiEndpoint = apiEndPoint; @@ -151,6 +157,9 @@ class Chat { if (this.enable_top_p) { body["top_p"] = this.top_p; } + if (this.enable_max_gen_tokens) { + body["max_tokens"] = this.max_gen_tokens; + } return fetch(this.apiEndpoint, { method: "POST", diff --git a/src/settings.tsx b/src/settings.tsx index c96d824..1aa901a 100644 --- a/src/settings.tsx +++ b/src/settings.tsx @@ -188,6 +188,7 @@ const Number = (props: { field: | "totalTokens" | "maxTokens" + | "maxGenTokens" | "tokenMargin" | "postBeginIndex" | "presence_penalty" @@ -197,9 +198,27 @@ const Number = (props: { }) => { return ( - + + + {props.field === "maxGenTokens" && ( + { + const newChatStore = { ...props.chatStore }; + newChatStore.maxGenTokens_enabled = + !newChatStore.maxGenTokens_enabled; + props.setChatStore({ ...newChatStore }); + }} + /> + )} + +