refac: models newChatStore
This commit is contained in:
@@ -1,3 +0,0 @@
|
|||||||
const CHATGPT_API_WEB_VERSION = "v2.1.0";
|
|
||||||
|
|
||||||
export default CHATGPT_API_WEB_VERSION;
|
|
||||||
@@ -5,9 +5,10 @@ import "@/global.css";
|
|||||||
import { calculate_token_length } from "@/chatgpt";
|
import { calculate_token_length } from "@/chatgpt";
|
||||||
import getDefaultParams from "@/utils/getDefaultParam";
|
import getDefaultParams from "@/utils/getDefaultParam";
|
||||||
import ChatBOX from "@/chatbox";
|
import ChatBOX from "@/chatbox";
|
||||||
import models, { defaultModel } from "@/models";
|
import { DefaultModel } from "@/const";
|
||||||
import { Tr, langCodeContext, LANG_OPTIONS } from "@/translate";
|
import { Tr, langCodeContext, LANG_OPTIONS } from "@/translate";
|
||||||
import { ChatStore, newChatStore } from "@/types/chatstore";
|
import { ChatStore } from "@/types/chatstore";
|
||||||
|
import { newChatStore } from "@/types/newChatstore";
|
||||||
|
|
||||||
export const STORAGE_NAME = "chatgpt-api-web";
|
export const STORAGE_NAME = "chatgpt-api-web";
|
||||||
const STORAGE_NAME_SELECTED = `${STORAGE_NAME}-selected`;
|
const STORAGE_NAME_SELECTED = `${STORAGE_NAME}-selected`;
|
||||||
@@ -145,7 +146,7 @@ export function App() {
|
|||||||
// handle read from old version chatstore
|
// handle read from old version chatstore
|
||||||
if (ret.maxGenTokens === undefined) ret.maxGenTokens = 2048;
|
if (ret.maxGenTokens === undefined) ret.maxGenTokens = 2048;
|
||||||
if (ret.maxGenTokens_enabled === undefined) ret.maxGenTokens_enabled = true;
|
if (ret.maxGenTokens_enabled === undefined) ret.maxGenTokens_enabled = true;
|
||||||
if (ret.model === undefined) ret.model = defaultModel;
|
if (ret.model === undefined) ret.model = DefaultModel;
|
||||||
if (ret.responseModelName === undefined) ret.responseModelName = "";
|
if (ret.responseModelName === undefined) ret.responseModelName = "";
|
||||||
if (ret.toolsString === undefined) ret.toolsString = "";
|
if (ret.toolsString === undefined) ret.toolsString = "";
|
||||||
if (ret.chatgpt_api_web_version === undefined)
|
if (ret.chatgpt_api_web_version === undefined)
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ import {
|
|||||||
TemplateTools,
|
TemplateTools,
|
||||||
} from "./types/chatstore";
|
} from "./types/chatstore";
|
||||||
import Message from "@/message";
|
import Message from "@/message";
|
||||||
import models from "@/models";
|
import { models } from "@/types/models";
|
||||||
import Settings from "@/settings";
|
import Settings from "@/settings";
|
||||||
import getDefaultParams from "@/utils/getDefaultParam";
|
import getDefaultParams from "@/utils/getDefaultParam";
|
||||||
import { AddImage } from "@/addImage";
|
import { AddImage } from "@/addImage";
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import { defaultModel } from "@/models";
|
import { DefaultModel } from "@/const";
|
||||||
|
|
||||||
export interface ImageURL {
|
export interface ImageURL {
|
||||||
url: string;
|
url: string;
|
||||||
@@ -157,7 +157,7 @@ class Chat {
|
|||||||
enable_max_gen_tokens = true,
|
enable_max_gen_tokens = true,
|
||||||
tokens_margin = 1024,
|
tokens_margin = 1024,
|
||||||
apiEndPoint = "https://api.openai.com/v1/chat/completions",
|
apiEndPoint = "https://api.openai.com/v1/chat/completions",
|
||||||
model = defaultModel,
|
model = DefaultModel,
|
||||||
temperature = 0.7,
|
temperature = 0.7,
|
||||||
enable_temperature = true,
|
enable_temperature = true,
|
||||||
top_p = 1,
|
top_p = 1,
|
||||||
|
|||||||
3
src/const.ts
Normal file
3
src/const.ts
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
export const DefaultAPIEndpoint = "https://api.openai.com/v1/chat/completions";
|
||||||
|
export const CHATGPT_API_WEB_VERSION = "v2.1.0";
|
||||||
|
export const DefaultModel = "gpt-4o-mini";
|
||||||
@@ -28,7 +28,7 @@ import {
|
|||||||
TemplateAPI,
|
TemplateAPI,
|
||||||
TemplateTools,
|
TemplateTools,
|
||||||
} from "@/types/chatstore";
|
} from "@/types/chatstore";
|
||||||
import models from "@/models";
|
import { models } from "@/types/models";
|
||||||
import { tr, Tr, langCodeContext, LANG_OPTIONS } from "@/translate";
|
import { tr, Tr, langCodeContext, LANG_OPTIONS } from "@/translate";
|
||||||
import { isVailedJSON } from "@/message";
|
import { isVailedJSON } from "@/message";
|
||||||
import { SetAPIsTemplate } from "@/setAPIsTemplate";
|
import { SetAPIsTemplate } from "@/setAPIsTemplate";
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
import { Logprobs, Message } from "@/chatgpt";
|
import { Logprobs, Message } from "@/chatgpt";
|
||||||
import models, { defaultModel } from "@/models";
|
import { DefaultModel, CHATGPT_API_WEB_VERSION } from "@/const";
|
||||||
import CHATGPT_API_WEB_VERSION from "@/CHATGPT_API_WEB_VERSION";
|
|
||||||
import getDefaultParams from "@/utils/getDefaultParam";
|
import getDefaultParams from "@/utils/getDefaultParam";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -73,68 +72,3 @@ export interface ChatStoreMessage extends Message {
|
|||||||
audio: Blob | null;
|
audio: Blob | null;
|
||||||
logprobs: Logprobs | null;
|
logprobs: Logprobs | null;
|
||||||
}
|
}
|
||||||
|
|
||||||
const _defaultAPIEndpoint = "https://api.openai.com/v1/chat/completions";
|
|
||||||
export const newChatStore = (
|
|
||||||
apiKey = "",
|
|
||||||
systemMessageContent = "",
|
|
||||||
apiEndpoint = _defaultAPIEndpoint,
|
|
||||||
streamMode = true,
|
|
||||||
model = defaultModel,
|
|
||||||
temperature = 0.7,
|
|
||||||
dev = false,
|
|
||||||
whisper_api = "https://api.openai.com/v1/audio/transcriptions",
|
|
||||||
whisper_key = "",
|
|
||||||
tts_api = "https://api.openai.com/v1/audio/speech",
|
|
||||||
tts_key = "",
|
|
||||||
tts_speed = 1.0,
|
|
||||||
tts_speed_enabled = false,
|
|
||||||
tts_format = "mp3",
|
|
||||||
toolsString = "",
|
|
||||||
image_gen_api = "https://api.openai.com/v1/images/generations",
|
|
||||||
image_gen_key = "",
|
|
||||||
json_mode = false,
|
|
||||||
logprobs = false,
|
|
||||||
): ChatStore => {
|
|
||||||
return {
|
|
||||||
chatgpt_api_web_version: CHATGPT_API_WEB_VERSION,
|
|
||||||
systemMessageContent: getDefaultParams("sys", systemMessageContent),
|
|
||||||
toolsString,
|
|
||||||
history: [],
|
|
||||||
postBeginIndex: 0,
|
|
||||||
tokenMargin: 1024,
|
|
||||||
totalTokens: 0,
|
|
||||||
maxTokens: getDefaultParams(
|
|
||||||
"max",
|
|
||||||
models[getDefaultParams("model", model)]?.maxToken ?? 2048,
|
|
||||||
),
|
|
||||||
maxGenTokens: 2048,
|
|
||||||
maxGenTokens_enabled: false,
|
|
||||||
apiKey: getDefaultParams("key", apiKey),
|
|
||||||
apiEndpoint: getDefaultParams("api", apiEndpoint),
|
|
||||||
streamMode: getDefaultParams("mode", streamMode),
|
|
||||||
model: getDefaultParams("model", model),
|
|
||||||
responseModelName: "",
|
|
||||||
cost: 0,
|
|
||||||
temperature: getDefaultParams("temp", temperature),
|
|
||||||
temperature_enabled: true,
|
|
||||||
top_p: 1,
|
|
||||||
top_p_enabled: false,
|
|
||||||
presence_penalty: 0,
|
|
||||||
frequency_penalty: 0,
|
|
||||||
develop_mode: getDefaultParams("dev", dev),
|
|
||||||
whisper_api: getDefaultParams("whisper-api", whisper_api),
|
|
||||||
whisper_key: getDefaultParams("whisper-key", whisper_key),
|
|
||||||
tts_api: getDefaultParams("tts-api", tts_api),
|
|
||||||
tts_key: getDefaultParams("tts-key", tts_key),
|
|
||||||
tts_voice: "alloy",
|
|
||||||
tts_speed: tts_speed,
|
|
||||||
tts_speed_enabled: tts_speed_enabled,
|
|
||||||
image_gen_api: image_gen_api,
|
|
||||||
image_gen_key: image_gen_key,
|
|
||||||
json_mode: json_mode,
|
|
||||||
tts_format: tts_format,
|
|
||||||
logprobs,
|
|
||||||
contents_for_index: [],
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ interface Model {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
const models: Record<string, Model> = {
|
export const models: Record<string, Model> = {
|
||||||
"gpt-4o": {
|
"gpt-4o": {
|
||||||
maxToken: 128000,
|
maxToken: 128000,
|
||||||
price: { prompt: 0.005 / 1000, completion: 0.015 / 1000 },
|
price: { prompt: 0.005 / 1000, completion: 0.015 / 1000 },
|
||||||
@@ -80,7 +80,3 @@ const models: Record<string, Model> = {
|
|||||||
price: { prompt: 0.06 / 1000, completion: 0.12 / 1000 },
|
price: { prompt: 0.06 / 1000, completion: 0.12 / 1000 },
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
export const defaultModel = "gpt-4o-mini";
|
|
||||||
|
|
||||||
export default models;
|
|
||||||
72
src/types/newChatstore.ts
Normal file
72
src/types/newChatstore.ts
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
import {
|
||||||
|
DefaultAPIEndpoint,
|
||||||
|
DefaultModel,
|
||||||
|
CHATGPT_API_WEB_VERSION,
|
||||||
|
} from "@/const";
|
||||||
|
import getDefaultParams from "@/utils/getDefaultParam";
|
||||||
|
import { ChatStore } from "@/types/chatstore";
|
||||||
|
import { models } from "@/types/models";
|
||||||
|
|
||||||
|
export const newChatStore = (
|
||||||
|
apiKey = "",
|
||||||
|
systemMessageContent = "",
|
||||||
|
apiEndpoint = DefaultAPIEndpoint,
|
||||||
|
streamMode = true,
|
||||||
|
model = DefaultModel,
|
||||||
|
temperature = 0.7,
|
||||||
|
dev = false,
|
||||||
|
whisper_api = "https://api.openai.com/v1/audio/transcriptions",
|
||||||
|
whisper_key = "",
|
||||||
|
tts_api = "https://api.openai.com/v1/audio/speech",
|
||||||
|
tts_key = "",
|
||||||
|
tts_speed = 1.0,
|
||||||
|
tts_speed_enabled = false,
|
||||||
|
tts_format = "mp3",
|
||||||
|
toolsString = "",
|
||||||
|
image_gen_api = "https://api.openai.com/v1/images/generations",
|
||||||
|
image_gen_key = "",
|
||||||
|
json_mode = false,
|
||||||
|
logprobs = false,
|
||||||
|
): ChatStore => {
|
||||||
|
return {
|
||||||
|
chatgpt_api_web_version: CHATGPT_API_WEB_VERSION,
|
||||||
|
systemMessageContent: getDefaultParams("sys", systemMessageContent),
|
||||||
|
toolsString,
|
||||||
|
history: [],
|
||||||
|
postBeginIndex: 0,
|
||||||
|
tokenMargin: 1024,
|
||||||
|
totalTokens: 0,
|
||||||
|
maxTokens: getDefaultParams(
|
||||||
|
"max",
|
||||||
|
models[getDefaultParams("model", model)]?.maxToken ?? 2048,
|
||||||
|
),
|
||||||
|
maxGenTokens: 2048,
|
||||||
|
maxGenTokens_enabled: false,
|
||||||
|
apiKey: getDefaultParams("key", apiKey),
|
||||||
|
apiEndpoint: getDefaultParams("api", apiEndpoint),
|
||||||
|
streamMode: getDefaultParams("mode", streamMode),
|
||||||
|
model: getDefaultParams("model", model),
|
||||||
|
responseModelName: "",
|
||||||
|
cost: 0,
|
||||||
|
temperature: getDefaultParams("temp", temperature),
|
||||||
|
temperature_enabled: true,
|
||||||
|
top_p: 1,
|
||||||
|
top_p_enabled: false,
|
||||||
|
presence_penalty: 0,
|
||||||
|
frequency_penalty: 0,
|
||||||
|
develop_mode: getDefaultParams("dev", dev),
|
||||||
|
whisper_api: getDefaultParams("whisper-api", whisper_api),
|
||||||
|
whisper_key: getDefaultParams("whisper-key", whisper_key),
|
||||||
|
tts_api: getDefaultParams("tts-api", tts_api),
|
||||||
|
tts_key: getDefaultParams("tts-key", tts_key),
|
||||||
|
tts_voice: "alloy",
|
||||||
|
tts_speed: tts_speed,
|
||||||
|
tts_speed_enabled: tts_speed_enabled,
|
||||||
|
image_gen_api: image_gen_api,
|
||||||
|
image_gen_key: image_gen_key,
|
||||||
|
json_mode: json_mode,
|
||||||
|
tts_format: tts_format,
|
||||||
|
logprobs,
|
||||||
|
contents_for_index: [],
|
||||||
|
};
|
||||||
|
};
|
||||||
Reference in New Issue
Block a user