update models

This commit is contained in:
2024-12-30 00:47:32 +08:00
parent 7413bf10ff
commit 0e5d29b5ed
3 changed files with 64 additions and 40 deletions

View File

@@ -49,10 +49,24 @@ interface LogprobsContent {
logprob: number; logprob: number;
} }
interface PromptTokensDetails {
cached_tokens: number;
audio_tokens: number;
}
interface CompletionTokensDetails {
reasoning_tokens: number;
audio_tokens: number;
accepted_prediction_tokens: number;
rejected_prediction_tokens: number;
}
export interface Usage { export interface Usage {
prompt_tokens: number; prompt_tokens: number;
completion_tokens: number; completion_tokens: number;
total_tokens: number; total_tokens: number;
prompt_tokens_details: PromptTokensDetails | null;
completion_tokens_details: CompletionTokensDetails | null;
response_model_name: string | null; response_model_name: string | null;
} }
@@ -96,11 +110,7 @@ export interface FetchResponse {
object: string; object: string;
created: number; created: number;
model: string; model: string;
usage: { usage: Usage;
prompt_tokens: number | undefined;
completion_tokens: number | undefined;
total_tokens: number | undefined;
};
choices: { choices: {
message: Message | undefined; message: Message | undefined;
finish_reason: "stop" | "length"; finish_reason: "stop" | "length";

View File

@@ -219,6 +219,8 @@ export default function ChatBOX() {
completion_tokens: responseTokenCount, completion_tokens: responseTokenCount,
total_tokens: prompt_tokens + responseTokenCount, total_tokens: prompt_tokens + responseTokenCount,
response_model_name: response_model_name, response_model_name: response_model_name,
prompt_tokens_details: null,
completion_tokens_details: null,
}; };
if (usage) { if (usage) {
@@ -226,6 +228,8 @@ export default function ChatBOX() {
ret.completion_tokens = usage.completion_tokens ?? responseTokenCount; ret.completion_tokens = usage.completion_tokens ?? responseTokenCount;
ret.total_tokens = ret.total_tokens =
usage.total_tokens ?? prompt_tokens + responseTokenCount; usage.total_tokens ?? prompt_tokens + responseTokenCount;
ret.prompt_tokens_details = usage.prompt_tokens_details ?? null;
ret.completion_tokens_details = usage.completion_tokens_details ?? null;
} }
return ret; return ret;
@@ -265,6 +269,8 @@ export default function ChatBOX() {
completion_tokens: data.usage.completion_tokens ?? 0, completion_tokens: data.usage.completion_tokens ?? 0,
total_tokens: data.usage.total_tokens ?? 0, total_tokens: data.usage.total_tokens ?? 0,
response_model_name: data.model ?? null, response_model_name: data.model ?? null,
prompt_tokens_details: data.usage.prompt_tokens_details ?? null,
completion_tokens_details: data.usage.completion_tokens_details ?? null,
}; };
return ret; return ret;

View File

@@ -3,112 +3,120 @@ interface Model {
price: { price: {
prompt: number; prompt: number;
completion: number; completion: number;
cached_prompt?: number;
}; };
} }
const M = 1000 / 1000; // dollars per million tokens
const K = 1000; // dollars per thousand tokens
export const models: Record<string, Model> = { export const models: Record<string, Model> = {
"gpt-4o": { "gpt-4o": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.005 / 1000, completion: 0.015 / 1000 }, price: { prompt: 2.5 / M, cached_prompt: 1.25 / M, completion: 10 / M },
}, },
"gpt-4o-2024-11-20": { "gpt-4o-2024-11-20": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.0025 / 1000, completion: 0.01 / 1000 }, price: { prompt: 2.5 / M, cached_prompt: 1.25 / M, completion: 10 / M },
}, },
"gpt-4o-2024-08-06": { "gpt-4o-2024-08-06": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.0025 / 1000, completion: 0.01 / 1000 }, price: { prompt: 2.5 / M, cached_prompt: 1.25 / M, completion: 10 / M },
}, },
"gpt-4o-2024-05-13": { "gpt-4o-2024-05-13": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.005 / 1000, completion: 0.015 / 1000 }, price: { prompt: 5 / M, completion: 15 / M },
}, },
"gpt-4o-mini": { "gpt-4o-mini": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.15 / 1000 / 1000, completion: 0.6 / 1000 / 1000 }, price: { prompt: 0.15 / M, cached_prompt: 0.075 / M, completion: 0.6 / M },
}, },
"gpt-4o-mini-2024-07-18": { "gpt-4o-mini-2024-07-18": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.15 / 1000 / 1000, completion: 0.6 / 1000 / 1000 }, price: { prompt: 0.15 / M, cached_prompt: 0.075 / M, completion: 0.6 / M },
},
o1: {
maxToken: 128000,
price: { prompt: 15 / M, cached_prompt: 7.5 / M, completion: 60 },
},
"o1-2024-12-17": {
maxToken: 128000,
price: { prompt: 15 / M, cached_prompt: 7.5 / M, completion: 60 },
}, },
"o1-preview": { "o1-preview": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 15 / 1000 / 1000, completion: 60 / 1000 / 1000 }, price: { prompt: 15 / M, cached_prompt: 7.5 / M, completion: 60 },
}, },
"o1-preview-2024-09-12": { "o1-preview-2024-09-12": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 15 / 1000 / 1000, completion: 60 / 1000 / 1000 }, price: { prompt: 15 / M, cached_prompt: 7.5 / M, completion: 60 },
}, },
"o1-mini": { "o1-mini": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 3 / 1000 / 1000, completion: 12 / 1000 / 1000 }, price: { prompt: 3 / M, cached_prompt: 1.5 / M, completion: 12 / M },
}, },
"o1-mini-2024-09-12": { "o1-mini-2024-09-12": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 3 / 1000 / 1000, completion: 12 / 1000 / 1000 }, price: { prompt: 3 / M, cached_prompt: 1.5 / M, completion: 12 / M },
}, },
"chatgpt-4o-latest": { "chatgpt-4o-latest": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.005 / 1000, completion: 0.015 / 1000 }, price: { prompt: 5 / M, completion: 15 / M },
}, },
"gpt-4-turbo": { "gpt-4-turbo": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 }, price: { prompt: 10 / M, completion: 30 / M },
}, },
"gpt-4-turbo-2024-04-09": { "gpt-4-turbo-2024-04-09": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 }, price: { prompt: 10 / M, completion: 30 / M },
}, },
"gpt-4": { "gpt-4": {
maxToken: 8192, maxToken: 8000,
price: { prompt: 0.03 / 1000, completion: 0.06 / 1000 }, price: { prompt: 30 / M, completion: 60 / M },
}, },
"gpt-4-32k": { "gpt-4-32k": {
maxToken: 8192, maxToken: 32000,
price: { prompt: 0.06 / 1000, completion: 0.12 / 1000 }, price: { prompt: 60 / M, completion: 120 / M },
}, },
"gpt-4-0125-preview": { "gpt-4-0125-preview": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 }, price: { prompt: 10 / M, completion: 30 / M },
}, },
"gpt-4-1106-preview": { "gpt-4-1106-preview": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 }, price: { prompt: 10 / M, completion: 30 / M },
}, },
"gpt-4-vision-preview": { "gpt-4-vision-preview": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 }, price: { prompt: 10 / M, completion: 30 / M },
}, },
"gpt-4-1106-vision-preview": { "gpt-4-1106-vision-preview": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 }, price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 },
}, },
"gpt-3.5-turbo": {
maxToken: 4096,
price: { prompt: 0.0015 / 1000, completion: 0.002 / 1000 },
},
"gpt-3.5-turbo-0125": { "gpt-3.5-turbo-0125": {
maxToken: 16385, maxToken: 16000,
price: { prompt: 0.0005 / 1000, completion: 0.0015 / 1000 }, price: { prompt: 0.5 / M, completion: 1.5 / M },
}, },
"gpt-3.5-turbo-instruct": { "gpt-3.5-turbo-instruct": {
maxToken: 16385, maxToken: 16000,
price: { prompt: 0.5 / 1000 / 1000, completion: 2 / 1000 / 1000 }, price: { prompt: 1.5 / M, completion: 2 / M },
}, },
"gpt-3.5-turbo-1106": { "gpt-3.5-turbo-1106": {
maxToken: 16385, maxToken: 16000,
price: { prompt: 0.001 / 1000, completion: 0.002 / 1000 }, price: { prompt: 1 / M, completion: 2 / M },
}, },
"gpt-3.5-turbo-0613": { "gpt-3.5-turbo-0613": {
maxToken: 16385, maxToken: 16000,
price: { prompt: 1.5 / 1000 / 1000, completion: 2 / 1000 / 1000 }, price: { prompt: 1.5 / M, completion: 2 / M },
}, },
"gpt-3.5-turbo-16k-0613": { "gpt-3.5-turbo-16k-0613": {
maxToken: 16385, maxToken: 16000,
price: { prompt: 0.003 / 1000, completion: 0.004 / 1000 }, price: { prompt: 3 / M, completion: 4 / M },
}, },
"gpt-3.5-turbo-0301": { "gpt-3.5-turbo-0301": {
maxToken: 16385, maxToken: 16385,
price: { prompt: 1.5 / 1000 / 1000, completion: 2 / 1000 / 1000 }, price: { prompt: 1.5 / M, completion: 2 / M },
}, },
}; };