diff --git a/server/utils/AiProviders/openAi/index.js b/server/utils/AiProviders/openAi/index.js index 4f6bc2219b29bf16c487758a09e6caa242329844..2829a53747f9f024e3331bcee3f8c85a3669ddd9 100644 --- a/server/utils/AiProviders/openAi/index.js +++ b/server/utils/AiProviders/openAi/index.js @@ -62,7 +62,9 @@ class OpenAiLLM { // we don't want to hit the OpenAI api every chat because it will get spammed // and introduce latency for no reason. async isValidChatCompletionModel(modelName = "") { - const isPreset = modelName.toLowerCase().includes("gpt"); + const isPreset = + modelName.toLowerCase().includes("gpt") || + modelName.toLowerCase().includes("o1"); if (isPreset) return true; const model = await this.openai.models diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js index 35ab5570d05eecca84fe68fc0640f73949f582a0..1361c527196b99a2ee95612461eab4a064717103 100644 --- a/server/utils/helpers/customModels.js +++ b/server/utils/helpers/customModels.js @@ -141,9 +141,17 @@ async function openAiModels(apiKey = null) { }); const gpts = allModels - .filter((model) => model.id.startsWith("gpt") || model.id.startsWith("o1")) .filter( - (model) => !model.id.includes("vision") && !model.id.includes("instruct") + (model) => + (model.id.includes("gpt") && !model.id.startsWith("ft:")) || + model.id.includes("o1") + ) + .filter( + (model) => + !model.id.includes("vision") && + !model.id.includes("instruct") && + !model.id.includes("audio") && + !model.id.includes("realtime") ) .map((model) => { return {