4 Commits

Author SHA1 Message Date
7b00a6ea7f wenker custom params 2024-02-28 12:25:25 +08:00
74b60b4e95 fix: logprobs in vision model not permit 2024-02-24 09:47:44 +08:00
24aba9ae07 fix: old version logprobs to false 2024-02-23 19:46:09 +08:00
4b1f81f72b fix: build 2024-02-23 19:04:41 +08:00
4 changed files with 4 additions and 57 deletions

View File

@@ -67,7 +67,7 @@ export interface ChatStore {
logprobs: boolean; logprobs: boolean;
} }
const _defaultAPIEndpoint = "https://api.openai.com/v1/chat/completions"; const _defaultAPIEndpoint = "/v1/chat/completions";
export const newChatStore = ( export const newChatStore = (
apiKey = "", apiKey = "",
systemMessageContent = "", systemMessageContent = "",
@@ -217,7 +217,6 @@ export function App() {
message.token = calculate_token_length(message.content); message.token = calculate_token_length(message.content);
} }
if (ret.cost === undefined) ret.cost = 0; if (ret.cost === undefined) ret.cost = 0;
if (ret.logprobs === undefined) ret.logprobs = true;
return ret; return ret;
}; };

View File

@@ -219,7 +219,6 @@ class Chat {
model: this.model, model: this.model,
messages, messages,
stream, stream,
logprobs,
presence_penalty: this.presence_penalty, presence_penalty: this.presence_penalty,
frequency_penalty: this.frequency_penalty, frequency_penalty: this.frequency_penalty,
}; };
@@ -237,6 +236,9 @@ class Chat {
type: "json_object", type: "json_object",
}; };
} }
if (logprobs) {
body["logprobs"] = true;
}
// parse toolsString to function call format // parse toolsString to function call format
const ts = this.toolsString.trim(); const ts = this.toolsString.trim();

View File

@@ -1,5 +1,3 @@
import React from "react";
const logprobToColor = (logprob: number) => { const logprobToColor = (logprob: number) => {
// 将logprob转换为百分比 // 将logprob转换为百分比
const percent = Math.exp(logprob) * 100; const percent = Math.exp(logprob) * 100;

View File

@@ -15,62 +15,10 @@ const models: Record<string, Model> = {
maxToken: 16385, maxToken: 16385,
price: { prompt: 0.001 / 1000, completion: 0.002 / 1000 }, price: { prompt: 0.001 / 1000, completion: 0.002 / 1000 },
}, },
"gpt-3.5-turbo": {
maxToken: 4096,
price: { prompt: 0.0015 / 1000, completion: 0.002 / 1000 },
},
"gpt-3.5-turbo-16k": {
maxToken: 16385,
price: { prompt: 0.003 / 1000, completion: 0.004 / 1000 },
},
"gpt-3.5-turbo-0613": {
maxToken: 4096,
price: { prompt: 0.0015 / 1000, completion: 0.002 / 1000 },
},
"gpt-3.5-turbo-16k-0613": {
maxToken: 16385,
price: { prompt: 0.003 / 1000, completion: 0.004 / 1000 },
},
"gpt-3.5-turbo-0301": {
maxToken: 4096,
price: { prompt: 0.0015 / 1000, completion: 0.002 / 1000 },
},
"gpt-4-0125-preview": { "gpt-4-0125-preview": {
maxToken: 128000, maxToken: 128000,
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 }, price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 },
}, },
"gpt-4-turbo-preview": {
maxToken: 128000,
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 },
},
"gpt-4-1106-preview": {
maxToken: 128000,
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 },
},
"gpt-4-vision-preview": {
maxToken: 128000,
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 },
},
"gpt-4-1106-vision-preview": {
maxToken: 128000,
price: { prompt: 0.01 / 1000, completion: 0.03 / 1000 },
},
"gpt-4": {
maxToken: 8192,
price: { prompt: 0.03 / 1000, completion: 0.06 / 1000 },
},
"gpt-4-0613": {
maxToken: 8192,
price: { prompt: 0.03 / 1000, completion: 0.06 / 1000 },
},
"gpt-4-32k": {
maxToken: 8192,
price: { prompt: 0.06 / 1000, completion: 0.12 / 1000 },
},
"gpt-4-32k-0613": {
maxToken: 8192,
price: { prompt: 0.06 / 1000, completion: 0.12 / 1000 },
},
}; };
export const defaultModel = "gpt-3.5-turbo-0125"; export const defaultModel = "gpt-3.5-turbo-0125";