diff --git a/frontend/src/components/LLMSelection/GroqAiOptions/index.jsx b/frontend/src/components/LLMSelection/GroqAiOptions/index.jsx index c85f0f1e018009d95578b5a5995c9314cef8698e..ceee0d703509e7a3387965dbcb6caf87e4166d18 100644 --- a/frontend/src/components/LLMSelection/GroqAiOptions/index.jsx +++ b/frontend/src/components/LLMSelection/GroqAiOptions/index.jsx @@ -28,7 +28,13 @@ export default function GroqAiOptions({ settings }) { required={true} className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5" > - {["llama2-70b-4096", "mixtral-8x7b-32768"].map((model) => { + {[ + "llama2-70b-4096", + "mixtral-8x7b-32768", + "llama3-8b-8192", + "llama3-70b-8192", + "gemma-7b-it", + ].map((model) => { return ( <option key={model} value={model}> {model} diff --git a/frontend/src/hooks/useGetProvidersModels.js b/frontend/src/hooks/useGetProvidersModels.js index b6cb403e16a0607d875d8e2a1296731e9980da34..aea66146aa80dbab6ba6dafe13a2fd7d5d2b9158 100644 --- a/frontend/src/hooks/useGetProvidersModels.js +++ b/frontend/src/hooks/useGetProvidersModels.js @@ -19,7 +19,13 @@ const PROVIDER_DEFAULT_MODELS = { localai: [], ollama: [], togetherai: [], - groq: ["llama2-70b-4096", "mixtral-8x7b-32768"], + groq: [ + "llama2-70b-4096", + "mixtral-8x7b-32768", + "llama3-8b-8192", + "llama3-70b-8192", + "gemma-7b-it", + ], native: [], }; diff --git a/server/utils/AiProviders/groq/index.js b/server/utils/AiProviders/groq/index.js index 1b15fe1fe8f5a786b273fb2e8193dfd33dda0e3b..c556d0357bd0ec0b18b214bfe37c5ee37e0de7f0 100644 --- a/server/utils/AiProviders/groq/index.js +++ b/server/utils/AiProviders/groq/index.js @@ -40,20 +40,31 @@ class GroqLLM { streamingEnabled() { return "streamChat" in this && "streamGetChatCompletion" in this; } - promptWindowLimit() { switch (this.model) { case "llama2-70b-4096": return 4096; case "mixtral-8x7b-32768": return 32_768; + case "llama3-8b-8192": + return 8192; + case "llama3-70b-8192": + return 8192; + case "gemma-7b-it": + return 8192; default: return 4096; } } async isValidChatCompletionModel(modelName = "") { - const validModels = ["llama2-70b-4096", "mixtral-8x7b-32768"]; + const validModels = [ + "llama2-70b-4096", + "mixtral-8x7b-32768", + "llama3-8b-8192", + "llama3-70b-8192", + "gemma-7b-it", + ]; const isPreset = validModels.some((model) => modelName === model); if (isPreset) return true;