From ee9da49f704452e8e8b72d5ea66b7bf52e5f5e48 Mon Sep 17 00:00:00 2001 From: heimoshuiyu Date: Tue, 15 Oct 2024 10:34:35 +0800 Subject: [PATCH] refac: models newChatStore --- src/CHATGPT_API_WEB_VERSION.ts | 3 -- src/app.tsx | 7 ++-- src/chatbox.tsx | 2 +- src/chatgpt.ts | 4 +- src/const.ts | 3 ++ src/settings.tsx | 2 +- src/types/chatstore.ts | 68 +------------------------------- src/{ => types}/models.ts | 6 +-- src/types/newChatstore.ts | 72 ++++++++++++++++++++++++++++++++++ 9 files changed, 85 insertions(+), 82 deletions(-) delete mode 100644 src/CHATGPT_API_WEB_VERSION.ts create mode 100644 src/const.ts rename src/{ => types}/models.ts (95%) create mode 100644 src/types/newChatstore.ts diff --git a/src/CHATGPT_API_WEB_VERSION.ts b/src/CHATGPT_API_WEB_VERSION.ts deleted file mode 100644 index 274d891..0000000 --- a/src/CHATGPT_API_WEB_VERSION.ts +++ /dev/null @@ -1,3 +0,0 @@ -const CHATGPT_API_WEB_VERSION = "v2.1.0"; - -export default CHATGPT_API_WEB_VERSION; diff --git a/src/app.tsx b/src/app.tsx index 9d8c6dc..cd818d6 100644 --- a/src/app.tsx +++ b/src/app.tsx @@ -5,9 +5,10 @@ import "@/global.css"; import { calculate_token_length } from "@/chatgpt"; import getDefaultParams from "@/utils/getDefaultParam"; import ChatBOX from "@/chatbox"; -import models, { defaultModel } from "@/models"; +import { DefaultModel } from "@/const"; import { Tr, langCodeContext, LANG_OPTIONS } from "@/translate"; -import { ChatStore, newChatStore } from "@/types/chatstore"; +import { ChatStore } from "@/types/chatstore"; +import { newChatStore } from "@/types/newChatstore"; export const STORAGE_NAME = "chatgpt-api-web"; const STORAGE_NAME_SELECTED = `${STORAGE_NAME}-selected`; @@ -145,7 +146,7 @@ export function App() { // handle read from old version chatstore if (ret.maxGenTokens === undefined) ret.maxGenTokens = 2048; if (ret.maxGenTokens_enabled === undefined) ret.maxGenTokens_enabled = true; - if (ret.model === undefined) ret.model = defaultModel; + if (ret.model === undefined) ret.model = DefaultModel; if (ret.responseModelName === undefined) ret.responseModelName = ""; if (ret.toolsString === undefined) ret.toolsString = ""; if (ret.chatgpt_api_web_version === undefined) diff --git a/src/chatbox.tsx b/src/chatbox.tsx index b0e9aca..683a966 100644 --- a/src/chatbox.tsx +++ b/src/chatbox.tsx @@ -41,7 +41,7 @@ import { TemplateTools, } from "./types/chatstore"; import Message from "@/message"; -import models from "@/models"; +import { models } from "@/types/models"; import Settings from "@/settings"; import getDefaultParams from "@/utils/getDefaultParam"; import { AddImage } from "@/addImage"; diff --git a/src/chatgpt.ts b/src/chatgpt.ts index 7154e89..6c31e3f 100644 --- a/src/chatgpt.ts +++ b/src/chatgpt.ts @@ -1,4 +1,4 @@ -import { defaultModel } from "@/models"; +import { DefaultModel } from "@/const"; export interface ImageURL { url: string; @@ -157,7 +157,7 @@ class Chat { enable_max_gen_tokens = true, tokens_margin = 1024, apiEndPoint = "https://api.openai.com/v1/chat/completions", - model = defaultModel, + model = DefaultModel, temperature = 0.7, enable_temperature = true, top_p = 1, diff --git a/src/const.ts b/src/const.ts new file mode 100644 index 0000000..14c7bd7 --- /dev/null +++ b/src/const.ts @@ -0,0 +1,3 @@ +export const DefaultAPIEndpoint = "https://api.openai.com/v1/chat/completions"; +export const CHATGPT_API_WEB_VERSION = "v2.1.0"; +export const DefaultModel = "gpt-4o-mini"; diff --git a/src/settings.tsx b/src/settings.tsx index ce120be..0a36503 100644 --- a/src/settings.tsx +++ b/src/settings.tsx @@ -28,7 +28,7 @@ import { TemplateAPI, TemplateTools, } from "@/types/chatstore"; -import models from "@/models"; +import { models } from "@/types/models"; import { tr, Tr, langCodeContext, LANG_OPTIONS } from "@/translate"; import { isVailedJSON } from "@/message"; import { SetAPIsTemplate } from "@/setAPIsTemplate"; diff --git a/src/types/chatstore.ts b/src/types/chatstore.ts index 9486b8b..88dc539 100644 --- a/src/types/chatstore.ts +++ b/src/types/chatstore.ts @@ -1,6 +1,5 @@ import { Logprobs, Message } from "@/chatgpt"; -import models, { defaultModel } from "@/models"; -import CHATGPT_API_WEB_VERSION from "@/CHATGPT_API_WEB_VERSION"; +import { DefaultModel, CHATGPT_API_WEB_VERSION } from "@/const"; import getDefaultParams from "@/utils/getDefaultParam"; /** @@ -73,68 +72,3 @@ export interface ChatStoreMessage extends Message { audio: Blob | null; logprobs: Logprobs | null; } - -const _defaultAPIEndpoint = "https://api.openai.com/v1/chat/completions"; -export const newChatStore = ( - apiKey = "", - systemMessageContent = "", - apiEndpoint = _defaultAPIEndpoint, - streamMode = true, - model = defaultModel, - temperature = 0.7, - dev = false, - whisper_api = "https://api.openai.com/v1/audio/transcriptions", - whisper_key = "", - tts_api = "https://api.openai.com/v1/audio/speech", - tts_key = "", - tts_speed = 1.0, - tts_speed_enabled = false, - tts_format = "mp3", - toolsString = "", - image_gen_api = "https://api.openai.com/v1/images/generations", - image_gen_key = "", - json_mode = false, - logprobs = false, -): ChatStore => { - return { - chatgpt_api_web_version: CHATGPT_API_WEB_VERSION, - systemMessageContent: getDefaultParams("sys", systemMessageContent), - toolsString, - history: [], - postBeginIndex: 0, - tokenMargin: 1024, - totalTokens: 0, - maxTokens: getDefaultParams( - "max", - models[getDefaultParams("model", model)]?.maxToken ?? 2048, - ), - maxGenTokens: 2048, - maxGenTokens_enabled: false, - apiKey: getDefaultParams("key", apiKey), - apiEndpoint: getDefaultParams("api", apiEndpoint), - streamMode: getDefaultParams("mode", streamMode), - model: getDefaultParams("model", model), - responseModelName: "", - cost: 0, - temperature: getDefaultParams("temp", temperature), - temperature_enabled: true, - top_p: 1, - top_p_enabled: false, - presence_penalty: 0, - frequency_penalty: 0, - develop_mode: getDefaultParams("dev", dev), - whisper_api: getDefaultParams("whisper-api", whisper_api), - whisper_key: getDefaultParams("whisper-key", whisper_key), - tts_api: getDefaultParams("tts-api", tts_api), - tts_key: getDefaultParams("tts-key", tts_key), - tts_voice: "alloy", - tts_speed: tts_speed, - tts_speed_enabled: tts_speed_enabled, - image_gen_api: image_gen_api, - image_gen_key: image_gen_key, - json_mode: json_mode, - tts_format: tts_format, - logprobs, - contents_for_index: [], - }; -}; diff --git a/src/models.ts b/src/types/models.ts similarity index 95% rename from src/models.ts rename to src/types/models.ts index 3534184..6f57b1a 100644 --- a/src/models.ts +++ b/src/types/models.ts @@ -6,7 +6,7 @@ interface Model { }; } -const models: Record = { +export const models: Record = { "gpt-4o": { maxToken: 128000, price: { prompt: 0.005 / 1000, completion: 0.015 / 1000 }, @@ -80,7 +80,3 @@ const models: Record = { price: { prompt: 0.06 / 1000, completion: 0.12 / 1000 }, }, }; - -export const defaultModel = "gpt-4o-mini"; - -export default models; diff --git a/src/types/newChatstore.ts b/src/types/newChatstore.ts new file mode 100644 index 0000000..658a47d --- /dev/null +++ b/src/types/newChatstore.ts @@ -0,0 +1,72 @@ +import { + DefaultAPIEndpoint, + DefaultModel, + CHATGPT_API_WEB_VERSION, +} from "@/const"; +import getDefaultParams from "@/utils/getDefaultParam"; +import { ChatStore } from "@/types/chatstore"; +import { models } from "@/types/models"; + +export const newChatStore = ( + apiKey = "", + systemMessageContent = "", + apiEndpoint = DefaultAPIEndpoint, + streamMode = true, + model = DefaultModel, + temperature = 0.7, + dev = false, + whisper_api = "https://api.openai.com/v1/audio/transcriptions", + whisper_key = "", + tts_api = "https://api.openai.com/v1/audio/speech", + tts_key = "", + tts_speed = 1.0, + tts_speed_enabled = false, + tts_format = "mp3", + toolsString = "", + image_gen_api = "https://api.openai.com/v1/images/generations", + image_gen_key = "", + json_mode = false, + logprobs = false, +): ChatStore => { + return { + chatgpt_api_web_version: CHATGPT_API_WEB_VERSION, + systemMessageContent: getDefaultParams("sys", systemMessageContent), + toolsString, + history: [], + postBeginIndex: 0, + tokenMargin: 1024, + totalTokens: 0, + maxTokens: getDefaultParams( + "max", + models[getDefaultParams("model", model)]?.maxToken ?? 2048, + ), + maxGenTokens: 2048, + maxGenTokens_enabled: false, + apiKey: getDefaultParams("key", apiKey), + apiEndpoint: getDefaultParams("api", apiEndpoint), + streamMode: getDefaultParams("mode", streamMode), + model: getDefaultParams("model", model), + responseModelName: "", + cost: 0, + temperature: getDefaultParams("temp", temperature), + temperature_enabled: true, + top_p: 1, + top_p_enabled: false, + presence_penalty: 0, + frequency_penalty: 0, + develop_mode: getDefaultParams("dev", dev), + whisper_api: getDefaultParams("whisper-api", whisper_api), + whisper_key: getDefaultParams("whisper-key", whisper_key), + tts_api: getDefaultParams("tts-api", tts_api), + tts_key: getDefaultParams("tts-key", tts_key), + tts_voice: "alloy", + tts_speed: tts_speed, + tts_speed_enabled: tts_speed_enabled, + image_gen_api: image_gen_api, + image_gen_key: image_gen_key, + json_mode: json_mode, + tts_format: tts_format, + logprobs, + contents_for_index: [], + }; +};