fix typo and cached_input token
This commit is contained in:
@@ -237,17 +237,6 @@ export default function ChatBOX() {
|
||||
|
||||
const _completeWithFetchMode = async (response: Response): Promise<Usage> => {
|
||||
const data = (await response.json()) as FetchResponse;
|
||||
if (data.model) {
|
||||
let cost = 0;
|
||||
cost +=
|
||||
(data.usage.prompt_tokens ?? 0) *
|
||||
(models[data.model]?.price?.prompt ?? 0);
|
||||
cost +=
|
||||
(data.usage.completion_tokens ?? 0) *
|
||||
(models[data.model]?.price?.completion ?? 0);
|
||||
chatStore.cost += cost;
|
||||
addTotalCost(cost);
|
||||
}
|
||||
const msg = client.processFetchResponse(data);
|
||||
|
||||
chatStore.history.push({
|
||||
@@ -355,12 +344,30 @@ export default function ChatBOX() {
|
||||
// calculate cost
|
||||
if (usage.response_model_name) {
|
||||
let cost = 0;
|
||||
cost +=
|
||||
usage.prompt_tokens *
|
||||
(models[usage.response_model_name]?.price?.prompt ?? 0);
|
||||
|
||||
if (usage.prompt_tokens_details) {
|
||||
const cached_prompt_tokens =
|
||||
usage.prompt_tokens_details.cached_tokens ?? 0;
|
||||
const uncached_prompt_tokens =
|
||||
usage.prompt_tokens - cached_prompt_tokens;
|
||||
const prompt_price =
|
||||
models[usage.response_model_name]?.price?.prompt ?? 0;
|
||||
const cached_price =
|
||||
models[usage.response_model_name]?.price?.cached_prompt ??
|
||||
prompt_price;
|
||||
cost +=
|
||||
cached_prompt_tokens * cached_price +
|
||||
uncached_prompt_tokens * prompt_price;
|
||||
} else {
|
||||
cost +=
|
||||
usage.prompt_tokens *
|
||||
(models[usage.response_model_name]?.price?.prompt ?? 0);
|
||||
}
|
||||
|
||||
cost +=
|
||||
usage.completion_tokens *
|
||||
(models[usage.response_model_name]?.price?.completion ?? 0);
|
||||
|
||||
addTotalCost(cost);
|
||||
chatStore.cost += cost;
|
||||
console.log("cost", cost);
|
||||
|
||||
@@ -7,7 +7,7 @@ interface Model {
|
||||
};
|
||||
}
|
||||
|
||||
const M = 1000 / 1000; // dollars per million tokens
|
||||
const M = 1000 * 1000; // dollars per million tokens
|
||||
const K = 1000; // dollars per thousand tokens
|
||||
|
||||
export const models: Record<string, Model> = {
|
||||
@@ -37,19 +37,19 @@ export const models: Record<string, Model> = {
|
||||
},
|
||||
o1: {
|
||||
maxToken: 128000,
|
||||
price: { prompt: 15 / M, cached_prompt: 7.5 / M, completion: 60 },
|
||||
price: { prompt: 15 / M, cached_prompt: 7.5 / M, completion: 60 / M },
|
||||
},
|
||||
"o1-2024-12-17": {
|
||||
maxToken: 128000,
|
||||
price: { prompt: 15 / M, cached_prompt: 7.5 / M, completion: 60 },
|
||||
price: { prompt: 15 / M, cached_prompt: 7.5 / M, completion: 60 / M },
|
||||
},
|
||||
"o1-preview": {
|
||||
maxToken: 128000,
|
||||
price: { prompt: 15 / M, cached_prompt: 7.5 / M, completion: 60 },
|
||||
price: { prompt: 15 / M, cached_prompt: 7.5 / M, completion: 60 / M },
|
||||
},
|
||||
"o1-preview-2024-09-12": {
|
||||
maxToken: 128000,
|
||||
price: { prompt: 15 / M, cached_prompt: 7.5 / M, completion: 60 },
|
||||
price: { prompt: 15 / M, cached_prompt: 7.5 / M, completion: 60 / M },
|
||||
},
|
||||
"o1-mini": {
|
||||
maxToken: 128000,
|
||||
@@ -93,7 +93,7 @@ export const models: Record<string, Model> = {
|
||||
},
|
||||
"gpt-4-1106-vision-preview": {
|
||||
maxToken: 128000,
|
||||
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 },
|
||||
price: { prompt: 10 / M, completion: 30 / M },
|
||||
},
|
||||
"gpt-3.5-turbo-0125": {
|
||||
maxToken: 16000,
|
||||
|
||||
Reference in New Issue
Block a user