diff --git a/README.md b/README.md index 7732114..6b40ead 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,9 @@ When the client sends a request with `gpt_refine=True`, this feature will be act - For `/v1/audio/transcriptions`, submit using `curl -F file=audio.mp4 -F gpt_refine=True`. - For `/v1/konele/ws` and `/v1/konele/post`, use the URL format `/v1/konele/ws/gpt_refine`. -The default model is `gpt-4o-mini`. You can easily edit the code to change the or LLM's prompt to better fit your workflow. It's just a few lines of code. Give it a try, it's very simple! +The default model is `gpt-4o-mini` set by environment variable `OPENAI_LLM_MODEL`. + +You can easily edit the code LLM's prompt to better fit your workflow. It's just a few lines of code. Give it a try, it's very simple! ## Usage diff --git a/whisper_fastapi.py b/whisper_fastapi.py index 5ee6cc2..5991f3a 100644 --- a/whisper_fastapi.py +++ b/whisper_fastapi.py @@ -81,7 +81,7 @@ async def gpt_refine_text( os.environ.get("OPENAI_BASE_URL", "https://api.openai.com/v1") + "/chat/completions", json={ - "model": "gpt-4o-mini", + "model": os.environ.get("OPENAI_LLM_MODEL", "gpt-4o-mini"), "temperature": 0.1, "stream": False, "messages": [