refac: models newChatStore
This commit is contained in:
@@ -1,6 +1,5 @@
|
||||
import { Logprobs, Message } from "@/chatgpt";
|
||||
import models, { defaultModel } from "@/models";
|
||||
import CHATGPT_API_WEB_VERSION from "@/CHATGPT_API_WEB_VERSION";
|
||||
import { DefaultModel, CHATGPT_API_WEB_VERSION } from "@/const";
|
||||
import getDefaultParams from "@/utils/getDefaultParam";
|
||||
|
||||
/**
|
||||
@@ -73,68 +72,3 @@ export interface ChatStoreMessage extends Message {
|
||||
audio: Blob | null;
|
||||
logprobs: Logprobs | null;
|
||||
}
|
||||
|
||||
const _defaultAPIEndpoint = "https://api.openai.com/v1/chat/completions";
|
||||
export const newChatStore = (
|
||||
apiKey = "",
|
||||
systemMessageContent = "",
|
||||
apiEndpoint = _defaultAPIEndpoint,
|
||||
streamMode = true,
|
||||
model = defaultModel,
|
||||
temperature = 0.7,
|
||||
dev = false,
|
||||
whisper_api = "https://api.openai.com/v1/audio/transcriptions",
|
||||
whisper_key = "",
|
||||
tts_api = "https://api.openai.com/v1/audio/speech",
|
||||
tts_key = "",
|
||||
tts_speed = 1.0,
|
||||
tts_speed_enabled = false,
|
||||
tts_format = "mp3",
|
||||
toolsString = "",
|
||||
image_gen_api = "https://api.openai.com/v1/images/generations",
|
||||
image_gen_key = "",
|
||||
json_mode = false,
|
||||
logprobs = false,
|
||||
): ChatStore => {
|
||||
return {
|
||||
chatgpt_api_web_version: CHATGPT_API_WEB_VERSION,
|
||||
systemMessageContent: getDefaultParams("sys", systemMessageContent),
|
||||
toolsString,
|
||||
history: [],
|
||||
postBeginIndex: 0,
|
||||
tokenMargin: 1024,
|
||||
totalTokens: 0,
|
||||
maxTokens: getDefaultParams(
|
||||
"max",
|
||||
models[getDefaultParams("model", model)]?.maxToken ?? 2048,
|
||||
),
|
||||
maxGenTokens: 2048,
|
||||
maxGenTokens_enabled: false,
|
||||
apiKey: getDefaultParams("key", apiKey),
|
||||
apiEndpoint: getDefaultParams("api", apiEndpoint),
|
||||
streamMode: getDefaultParams("mode", streamMode),
|
||||
model: getDefaultParams("model", model),
|
||||
responseModelName: "",
|
||||
cost: 0,
|
||||
temperature: getDefaultParams("temp", temperature),
|
||||
temperature_enabled: true,
|
||||
top_p: 1,
|
||||
top_p_enabled: false,
|
||||
presence_penalty: 0,
|
||||
frequency_penalty: 0,
|
||||
develop_mode: getDefaultParams("dev", dev),
|
||||
whisper_api: getDefaultParams("whisper-api", whisper_api),
|
||||
whisper_key: getDefaultParams("whisper-key", whisper_key),
|
||||
tts_api: getDefaultParams("tts-api", tts_api),
|
||||
tts_key: getDefaultParams("tts-key", tts_key),
|
||||
tts_voice: "alloy",
|
||||
tts_speed: tts_speed,
|
||||
tts_speed_enabled: tts_speed_enabled,
|
||||
image_gen_api: image_gen_api,
|
||||
image_gen_key: image_gen_key,
|
||||
json_mode: json_mode,
|
||||
tts_format: tts_format,
|
||||
logprobs,
|
||||
contents_for_index: [],
|
||||
};
|
||||
};
|
||||
|
||||
82
src/types/models.ts
Normal file
82
src/types/models.ts
Normal file
@@ -0,0 +1,82 @@
|
||||
interface Model {
|
||||
maxToken: number;
|
||||
price: {
|
||||
prompt: number;
|
||||
completion: number;
|
||||
};
|
||||
}
|
||||
|
||||
export const models: Record<string, Model> = {
|
||||
"gpt-4o": {
|
||||
maxToken: 128000,
|
||||
price: { prompt: 0.005 / 1000, completion: 0.015 / 1000 },
|
||||
},
|
||||
"gpt-4o-2024-08-06": {
|
||||
maxToken: 128000,
|
||||
price: { prompt: 0.0025 / 1000, completion: 0.01 / 1000 },
|
||||
},
|
||||
"gpt-4o-2024-05-13": {
|
||||
maxToken: 128000,
|
||||
price: { prompt: 0.005 / 1000, completion: 0.015 / 1000 },
|
||||
},
|
||||
"gpt-4o-mini": {
|
||||
maxToken: 128000,
|
||||
price: { prompt: 0.15 / 1000 / 1000, completion: 0.6 / 1000 / 1000 },
|
||||
},
|
||||
"gpt-4o-mini-2024-07-18": {
|
||||
maxToken: 128000,
|
||||
price: { prompt: 0.15 / 1000 / 1000, completion: 0.6 / 1000 / 1000 },
|
||||
},
|
||||
"gpt-3.5-turbo-0125": {
|
||||
maxToken: 16385,
|
||||
price: { prompt: 0.0005 / 1000, completion: 0.0015 / 1000 },
|
||||
},
|
||||
"gpt-3.5-turbo-1106": {
|
||||
maxToken: 16385,
|
||||
price: { prompt: 0.001 / 1000, completion: 0.002 / 1000 },
|
||||
},
|
||||
"gpt-3.5-turbo": {
|
||||
maxToken: 4096,
|
||||
price: { prompt: 0.0015 / 1000, completion: 0.002 / 1000 },
|
||||
},
|
||||
"gpt-3.5-turbo-16k": {
|
||||
maxToken: 16385,
|
||||
price: { prompt: 0.003 / 1000, completion: 0.004 / 1000 },
|
||||
},
|
||||
"gpt-4-turbo": {
|
||||
maxToken: 128000,
|
||||
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 },
|
||||
},
|
||||
"gpt-4-turbo-2024-04-09": {
|
||||
maxToken: 128000,
|
||||
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 },
|
||||
},
|
||||
"gpt-4-turbo-preview": {
|
||||
maxToken: 128000,
|
||||
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 },
|
||||
},
|
||||
"gpt-4-0125-preview": {
|
||||
maxToken: 128000,
|
||||
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 },
|
||||
},
|
||||
"gpt-4-1106-preview": {
|
||||
maxToken: 128000,
|
||||
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 },
|
||||
},
|
||||
"gpt-4-vision-preview": {
|
||||
maxToken: 128000,
|
||||
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 },
|
||||
},
|
||||
"gpt-4-1106-vision-preview": {
|
||||
maxToken: 128000,
|
||||
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 },
|
||||
},
|
||||
"gpt-4": {
|
||||
maxToken: 8192,
|
||||
price: { prompt: 0.03 / 1000, completion: 0.06 / 1000 },
|
||||
},
|
||||
"gpt-4-32k": {
|
||||
maxToken: 8192,
|
||||
price: { prompt: 0.06 / 1000, completion: 0.12 / 1000 },
|
||||
},
|
||||
};
|
||||
72
src/types/newChatstore.ts
Normal file
72
src/types/newChatstore.ts
Normal file
@@ -0,0 +1,72 @@
|
||||
import {
|
||||
DefaultAPIEndpoint,
|
||||
DefaultModel,
|
||||
CHATGPT_API_WEB_VERSION,
|
||||
} from "@/const";
|
||||
import getDefaultParams from "@/utils/getDefaultParam";
|
||||
import { ChatStore } from "@/types/chatstore";
|
||||
import { models } from "@/types/models";
|
||||
|
||||
export const newChatStore = (
|
||||
apiKey = "",
|
||||
systemMessageContent = "",
|
||||
apiEndpoint = DefaultAPIEndpoint,
|
||||
streamMode = true,
|
||||
model = DefaultModel,
|
||||
temperature = 0.7,
|
||||
dev = false,
|
||||
whisper_api = "https://api.openai.com/v1/audio/transcriptions",
|
||||
whisper_key = "",
|
||||
tts_api = "https://api.openai.com/v1/audio/speech",
|
||||
tts_key = "",
|
||||
tts_speed = 1.0,
|
||||
tts_speed_enabled = false,
|
||||
tts_format = "mp3",
|
||||
toolsString = "",
|
||||
image_gen_api = "https://api.openai.com/v1/images/generations",
|
||||
image_gen_key = "",
|
||||
json_mode = false,
|
||||
logprobs = false,
|
||||
): ChatStore => {
|
||||
return {
|
||||
chatgpt_api_web_version: CHATGPT_API_WEB_VERSION,
|
||||
systemMessageContent: getDefaultParams("sys", systemMessageContent),
|
||||
toolsString,
|
||||
history: [],
|
||||
postBeginIndex: 0,
|
||||
tokenMargin: 1024,
|
||||
totalTokens: 0,
|
||||
maxTokens: getDefaultParams(
|
||||
"max",
|
||||
models[getDefaultParams("model", model)]?.maxToken ?? 2048,
|
||||
),
|
||||
maxGenTokens: 2048,
|
||||
maxGenTokens_enabled: false,
|
||||
apiKey: getDefaultParams("key", apiKey),
|
||||
apiEndpoint: getDefaultParams("api", apiEndpoint),
|
||||
streamMode: getDefaultParams("mode", streamMode),
|
||||
model: getDefaultParams("model", model),
|
||||
responseModelName: "",
|
||||
cost: 0,
|
||||
temperature: getDefaultParams("temp", temperature),
|
||||
temperature_enabled: true,
|
||||
top_p: 1,
|
||||
top_p_enabled: false,
|
||||
presence_penalty: 0,
|
||||
frequency_penalty: 0,
|
||||
develop_mode: getDefaultParams("dev", dev),
|
||||
whisper_api: getDefaultParams("whisper-api", whisper_api),
|
||||
whisper_key: getDefaultParams("whisper-key", whisper_key),
|
||||
tts_api: getDefaultParams("tts-api", tts_api),
|
||||
tts_key: getDefaultParams("tts-key", tts_key),
|
||||
tts_voice: "alloy",
|
||||
tts_speed: tts_speed,
|
||||
tts_speed_enabled: tts_speed_enabled,
|
||||
image_gen_api: image_gen_api,
|
||||
image_gen_key: image_gen_key,
|
||||
json_mode: json_mode,
|
||||
tts_format: tts_format,
|
||||
logprobs,
|
||||
contents_for_index: [],
|
||||
};
|
||||
};
|
||||
Reference in New Issue
Block a user