Compare commits

..

7 Commits

Author SHA1 Message Date
626f406711 Revert "Deprecated max_token to max-completion_tokens"
All checks were successful
Build static content / build (push) Successful in 7m54s
This reverts commit 9dd4d99e54.
2024-12-08 17:04:28 +08:00
9dd4d99e54 Deprecated max_token to max-completion_tokens 2024-12-08 16:46:09 +08:00
5039bdfca8 temperature default to 1 2024-12-08 16:41:42 +08:00
64f1e3d70e update models 2024-12-08 16:41:02 +08:00
b98900873c more options on new chat store 2024-12-08 16:25:55 +08:00
400ebafc37 format code 2024-12-08 16:19:20 +08:00
e7c26560bb store response_model_name by message 2024-12-08 16:19:04 +08:00
10 changed files with 91 additions and 49 deletions

View File

@@ -55,7 +55,7 @@ ChatGPT API WEB 是为 ChatGPT 的日常用户和 Prompt 工程师设计的项
- `api`: API Endpoint 默认为 `https://api.openai.com/v1/chat/completions` - `api`: API Endpoint 默认为 `https://api.openai.com/v1/chat/completions`
- `mode`: `fetch``stream` 模式stream 模式下可以动态看到 api 返回的数据,但无法得知 token 数量,只能进行估算,在 token 数量过多时可能会裁切过多或过少历史消息 - `mode`: `fetch``stream` 模式stream 模式下可以动态看到 api 返回的数据,但无法得知 token 数量,只能进行估算,在 token 数量过多时可能会裁切过多或过少历史消息
- `dev`: true / false 开发模式,这个模式下可以看到并调整更多参数 - `dev`: true / false 开发模式,这个模式下可以看到并调整更多参数
- `temp`: 温度,默认 0.7 - `temp`: 温度,默认 1
- `whisper-api`: Whisper 语音转文字服务 API, 只有设置了此值后才会显示语音转文字按钮 - `whisper-api`: Whisper 语音转文字服务 API, 只有设置了此值后才会显示语音转文字按钮
- `whisper-key`: 用于 Whisper 服务的 key如果留空则默认使用上方的 OPENAI API KEY - `whisper-key`: 用于 Whisper 服务的 key如果留空则默认使用上方的 OPENAI API KEY

View File

@@ -263,6 +263,7 @@ export function AddImage({
example: false, example: false,
audio: null, audio: null,
logprobs: null, logprobs: null,
response_model_name: imageGenModel,
}); });
setChatStore({ ...chatStore }); setChatStore({ ...chatStore });

View File

@@ -158,7 +158,7 @@ class Chat {
tokens_margin = 1024, tokens_margin = 1024,
apiEndPoint = "https://api.openai.com/v1/chat/completions", apiEndPoint = "https://api.openai.com/v1/chat/completions",
model = DefaultModel, model = DefaultModel,
temperature = 0.7, temperature = 1,
enable_temperature = true, enable_temperature = true,
top_p = 1, top_p = 1,
enable_top_p = false, enable_top_p = false,

View File

@@ -181,6 +181,12 @@ export default function Message(props: Props) {
/> />
)} )}
<TTSPlay chat={chat} /> <TTSPlay chat={chat} />
{chat.response_model_name && (
<>
<span className="opacity-50">{chat.response_model_name}</span>
<hr />
</>
)}
</div> </div>
</div> </div>
{showEdit && ( {showEdit && (

View File

@@ -74,6 +74,7 @@ const AddToolMsg = (props: {
example: false, example: false,
audio: null, audio: null,
logprobs: null, logprobs: null,
response_model_name: null,
}); });
setChatStore({ ...chatStore }); setChatStore({ ...chatStore });
setNewToolCallID(""); setNewToolCallID("");

View File

@@ -34,7 +34,6 @@ export function App() {
if (ret.maxGenTokens === undefined) ret.maxGenTokens = 2048; if (ret.maxGenTokens === undefined) ret.maxGenTokens = 2048;
if (ret.maxGenTokens_enabled === undefined) ret.maxGenTokens_enabled = true; if (ret.maxGenTokens_enabled === undefined) ret.maxGenTokens_enabled = true;
if (ret.model === undefined) ret.model = DefaultModel; if (ret.model === undefined) ret.model = DefaultModel;
if (ret.responseModelName === undefined) ret.responseModelName = "";
if (ret.toolsString === undefined) ret.toolsString = ""; if (ret.toolsString === undefined) ret.toolsString = "";
if (ret.chatgpt_api_web_version === undefined) if (ret.chatgpt_api_web_version === undefined)
// this is from old version becasue it is undefined, // this is from old version becasue it is undefined,

View File

@@ -87,8 +87,9 @@ export default function ChatBOX(props: {
const logprobs: Logprobs = { const logprobs: Logprobs = {
content: [], content: [],
}; };
let response_model_name: string | null = null;
for await (const i of client.processStreamResponse(response)) { for await (const i of client.processStreamResponse(response)) {
chatStore.responseModelName = i.model; response_model_name = i.model;
responseTokenCount += 1; responseTokenCount += 1;
const c = i.choices[0]; const c = i.choices[0];
@@ -148,17 +149,17 @@ export default function ChatBOX(props: {
// estimate cost // estimate cost
let cost = 0; let cost = 0;
if (chatStore.responseModelName) { if (response_model_name) {
cost += cost +=
responseTokenCount * responseTokenCount *
(models[chatStore.responseModelName]?.price?.completion ?? 0); (models[response_model_name]?.price?.completion ?? 0);
let sum = 0; let sum = 0;
for (const msg of chatStore.history for (const msg of chatStore.history
.filter(({ hide }) => !hide) .filter(({ hide }) => !hide)
.slice(chatStore.postBeginIndex)) { .slice(chatStore.postBeginIndex)) {
sum += msg.token; sum += msg.token;
} }
cost += sum * (models[chatStore.responseModelName]?.price?.prompt ?? 0); cost += sum * (models[response_model_name]?.price?.prompt ?? 0);
} }
console.log("cost", cost); console.log("cost", cost);
@@ -174,6 +175,7 @@ export default function ChatBOX(props: {
example: false, example: false,
audio: null, audio: null,
logprobs, logprobs,
response_model_name,
}; };
if (allChunkTool.length > 0) newMsg.tool_calls = allChunkTool; if (allChunkTool.length > 0) newMsg.tool_calls = allChunkTool;
@@ -188,7 +190,6 @@ export default function ChatBOX(props: {
const _completeWithFetchMode = async (response: Response) => { const _completeWithFetchMode = async (response: Response) => {
const data = (await response.json()) as FetchResponse; const data = (await response.json()) as FetchResponse;
chatStore.responseModelName = data.model ?? "";
if (data.model) { if (data.model) {
let cost = 0; let cost = 0;
cost += cost +=
@@ -228,6 +229,7 @@ export default function ChatBOX(props: {
example: false, example: false,
audio: null, audio: null,
logprobs: data.choices[0]?.logprobs, logprobs: data.choices[0]?.logprobs,
response_model_name: data.model,
}); });
setShowGenerating(false); setShowGenerating(false);
}; };
@@ -311,7 +313,6 @@ export default function ChatBOX(props: {
console.log("empty message"); console.log("empty message");
return; return;
} }
if (call_complete) chatStore.responseModelName = "";
let content: string | MessageDetail[] = inputMsg; let content: string | MessageDetail[] = inputMsg;
if (images.length > 0) { if (images.length > 0) {
@@ -328,6 +329,7 @@ export default function ChatBOX(props: {
example: false, example: false,
audio: null, audio: null,
logprobs: null, logprobs: null,
response_model_name: null,
}); });
// manually calculate token length // manually calculate token length
@@ -619,11 +621,6 @@ export default function ChatBOX(props: {
)} )}
</p> </p>
<p className="p-2 my-2 text-center opacity-50 dark:text-white"> <p className="p-2 my-2 text-center opacity-50 dark:text-white">
{chatStore.responseModelName && (
<>
{Tr("Generated by")} {chatStore.responseModelName}
</>
)}
{chatStore.postBeginIndex !== 0 && ( {chatStore.postBeginIndex !== 0 && (
<> <>
<br /> <br />
@@ -754,6 +751,7 @@ export default function ChatBOX(props: {
example: false, example: false,
audio: null, audio: null,
logprobs: null, logprobs: null,
response_model_name: null,
}); });
setInputMsg(""); setInputMsg("");
setChatStore({ ...chatStore }); setChatStore({ ...chatStore });

View File

@@ -20,7 +20,6 @@ export interface ChatStore {
apiEndpoint: string; apiEndpoint: string;
streamMode: boolean; streamMode: boolean;
model: string; model: string;
responseModelName: string;
cost: number; cost: number;
temperature: number; temperature: number;
temperature_enabled: boolean; temperature_enabled: boolean;
@@ -69,4 +68,5 @@ export interface ChatStoreMessage extends Message {
example: boolean; example: boolean;
audio: Blob | null; audio: Blob | null;
logprobs: Logprobs | null; logprobs: Logprobs | null;
response_model_name: string | null;
} }

View File

@@ -11,6 +11,10 @@ export const models: Record<string, Model> = {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.005 / 1000, completion: 0.015 / 1000 }, price: { prompt: 0.005 / 1000, completion: 0.015 / 1000 },
}, },
"gpt-4o-2024-11-20": {
maxToken: 128000,
price: { prompt: 0.0025 / 1000, completion: 0.01 / 1000 },
},
"gpt-4o-2024-08-06": { "gpt-4o-2024-08-06": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.0025 / 1000, completion: 0.01 / 1000 }, price: { prompt: 0.0025 / 1000, completion: 0.01 / 1000 },
@@ -27,21 +31,25 @@ export const models: Record<string, Model> = {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.15 / 1000 / 1000, completion: 0.6 / 1000 / 1000 }, price: { prompt: 0.15 / 1000 / 1000, completion: 0.6 / 1000 / 1000 },
}, },
"gpt-3.5-turbo-0125": { "o1-preview": {
maxToken: 16385, maxToken: 128000,
price: { prompt: 0.0005 / 1000, completion: 0.0015 / 1000 }, price: { prompt: 15 / 1000 / 1000, completion: 60 / 1000 / 1000 },
}, },
"gpt-3.5-turbo-1106": { "o1-preview-2024-09-12": {
maxToken: 16385, maxToken: 128000,
price: { prompt: 0.001 / 1000, completion: 0.002 / 1000 }, price: { prompt: 15 / 1000 / 1000, completion: 60 / 1000 / 1000 },
}, },
"gpt-3.5-turbo": { "o1-mini": {
maxToken: 4096, maxToken: 128000,
price: { prompt: 0.0015 / 1000, completion: 0.002 / 1000 }, price: { prompt: 3 / 1000 / 1000, completion: 12 / 1000 / 1000 },
}, },
"gpt-3.5-turbo-16k": { "o1-mini-2024-09-12": {
maxToken: 16385, maxToken: 128000,
price: { prompt: 0.003 / 1000, completion: 0.004 / 1000 }, price: { prompt: 3 / 1000 / 1000, completion: 12 / 1000 / 1000 },
},
"chatgpt-4o-latest": {
maxToken: 128000,
price: { prompt: 0.005 / 1000, completion: 0.015 / 1000 },
}, },
"gpt-4-turbo": { "gpt-4-turbo": {
maxToken: 128000, maxToken: 128000,
@@ -51,9 +59,13 @@ export const models: Record<string, Model> = {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 }, price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 },
}, },
"gpt-4-turbo-preview": { "gpt-4": {
maxToken: 128000, maxToken: 8192,
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 }, price: { prompt: 0.03 / 1000, completion: 0.06 / 1000 },
},
"gpt-4-32k": {
maxToken: 8192,
price: { prompt: 0.06 / 1000, completion: 0.12 / 1000 },
}, },
"gpt-4-0125-preview": { "gpt-4-0125-preview": {
maxToken: 128000, maxToken: 128000,
@@ -71,12 +83,32 @@ export const models: Record<string, Model> = {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 }, price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 },
}, },
"gpt-4": { "gpt-3.5-turbo": {
maxToken: 8192, maxToken: 4096,
price: { prompt: 0.03 / 1000, completion: 0.06 / 1000 }, price: { prompt: 0.0015 / 1000, completion: 0.002 / 1000 },
}, },
"gpt-4-32k": { "gpt-3.5-turbo-0125": {
maxToken: 8192, maxToken: 16385,
price: { prompt: 0.06 / 1000, completion: 0.12 / 1000 }, price: { prompt: 0.0005 / 1000, completion: 0.0015 / 1000 },
},
"gpt-3.5-turbo-instruct": {
maxToken: 16385,
price: { prompt: 0.5 / 1000 / 1000, completion: 2 / 1000 / 1000 },
},
"gpt-3.5-turbo-1106": {
maxToken: 16385,
price: { prompt: 0.001 / 1000, completion: 0.002 / 1000 },
},
"gpt-3.5-turbo-0613": {
maxToken: 16385,
price: { prompt: 1.5 / 1000 / 1000, completion: 2 / 1000 / 1000 },
},
"gpt-3.5-turbo-16k-0613": {
maxToken: 16385,
price: { prompt: 0.003 / 1000, completion: 0.004 / 1000 },
},
"gpt-3.5-turbo-0301": {
maxToken: 16385,
price: { prompt: 1.5 / 1000 / 1000, completion: 2 / 1000 / 1000 },
}, },
}; };

View File

@@ -14,11 +14,17 @@ interface NewChatStoreOptions {
streamMode?: boolean; streamMode?: boolean;
model?: string; model?: string;
temperature?: number; temperature?: number;
temperature_enabled?: boolean;
top_p?: number;
top_p_enabled?: boolean;
presence_penalty?: number;
frequency_penalty?: number;
dev?: boolean; dev?: boolean;
whisper_api?: string; whisper_api?: string;
whisper_key?: string; whisper_key?: string;
tts_api?: string; tts_api?: string;
tts_key?: string; tts_key?: string;
tts_voice?: string;
tts_speed?: number; tts_speed?: number;
tts_speed_enabled?: boolean; tts_speed_enabled?: boolean;
tts_format?: string; tts_format?: string;
@@ -34,7 +40,7 @@ export const newChatStore = (options: NewChatStoreOptions): ChatStore => {
chatgpt_api_web_version: CHATGPT_API_WEB_VERSION, chatgpt_api_web_version: CHATGPT_API_WEB_VERSION,
systemMessageContent: getDefaultParams( systemMessageContent: getDefaultParams(
"sys", "sys",
options.systemMessageContent ?? "", options.systemMessageContent ?? ""
), ),
toolsString: options.toolsString ?? "", toolsString: options.toolsString ?? "",
history: [], history: [],
@@ -44,37 +50,36 @@ export const newChatStore = (options: NewChatStoreOptions): ChatStore => {
maxTokens: getDefaultParams( maxTokens: getDefaultParams(
"max", "max",
models[getDefaultParams("model", options.model ?? DefaultModel)] models[getDefaultParams("model", options.model ?? DefaultModel)]
?.maxToken ?? 2048, ?.maxToken ?? 2048
), ),
maxGenTokens: 2048, maxGenTokens: 2048,
maxGenTokens_enabled: false, maxGenTokens_enabled: false,
apiKey: getDefaultParams("key", options.apiKey ?? ""), apiKey: getDefaultParams("key", options.apiKey ?? ""),
apiEndpoint: getDefaultParams( apiEndpoint: getDefaultParams(
"api", "api",
options.apiEndpoint ?? DefaultAPIEndpoint, options.apiEndpoint ?? DefaultAPIEndpoint
), ),
streamMode: getDefaultParams("mode", options.streamMode ?? true), streamMode: getDefaultParams("mode", options.streamMode ?? true),
model: getDefaultParams("model", options.model ?? DefaultModel), model: getDefaultParams("model", options.model ?? DefaultModel),
responseModelName: "",
cost: 0, cost: 0,
temperature: getDefaultParams("temp", options.temperature ?? 0.7), temperature: getDefaultParams("temp", options.temperature ?? 1),
temperature_enabled: true, temperature_enabled: options.temperature_enabled ?? true,
top_p: 1, top_p: options.top_p ?? 1,
top_p_enabled: false, top_p_enabled: options.top_p_enabled ?? false,
presence_penalty: 0, presence_penalty: options.presence_penalty ?? 0,
frequency_penalty: 0, frequency_penalty: options.frequency_penalty ?? 0,
develop_mode: getDefaultParams("dev", options.dev ?? false), develop_mode: getDefaultParams("dev", options.dev ?? false),
whisper_api: getDefaultParams( whisper_api: getDefaultParams(
"whisper-api", "whisper-api",
options.whisper_api ?? "https://api.openai.com/v1/audio/transcriptions", options.whisper_api ?? "https://api.openai.com/v1/audio/transcriptions"
), ),
whisper_key: getDefaultParams("whisper-key", options.whisper_key ?? ""), whisper_key: getDefaultParams("whisper-key", options.whisper_key ?? ""),
tts_api: getDefaultParams( tts_api: getDefaultParams(
"tts-api", "tts-api",
options.tts_api ?? "https://api.openai.com/v1/audio/speech", options.tts_api ?? "https://api.openai.com/v1/audio/speech"
), ),
tts_key: getDefaultParams("tts-key", options.tts_key ?? ""), tts_key: getDefaultParams("tts-key", options.tts_key ?? ""),
tts_voice: "alloy", tts_voice: options.tts_voice ?? "alloy",
tts_speed: options.tts_speed ?? 1.0, tts_speed: options.tts_speed ?? 1.0,
tts_speed_enabled: options.tts_speed_enabled ?? false, tts_speed_enabled: options.tts_speed_enabled ?? false,
image_gen_api: image_gen_api: