Skip to content
Snippets Groups Projects
Unverified Commit 897e168f authored by Sean Hatfield's avatar Sean Hatfield Committed by GitHub
Browse files

[FEAT] Add support for more groq models (Llama 3 and Gemma) (#1143)

add support for more groq models
parent 7e3b8cd4
No related branches found
No related tags found
No related merge requests found
......@@ -28,7 +28,13 @@ export default function GroqAiOptions({ settings }) {
required={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{["llama2-70b-4096", "mixtral-8x7b-32768"].map((model) => {
{[
"llama2-70b-4096",
"mixtral-8x7b-32768",
"llama3-8b-8192",
"llama3-70b-8192",
"gemma-7b-it",
].map((model) => {
return (
<option key={model} value={model}>
{model}
......
......@@ -19,7 +19,13 @@ const PROVIDER_DEFAULT_MODELS = {
localai: [],
ollama: [],
togetherai: [],
groq: ["llama2-70b-4096", "mixtral-8x7b-32768"],
groq: [
"llama2-70b-4096",
"mixtral-8x7b-32768",
"llama3-8b-8192",
"llama3-70b-8192",
"gemma-7b-it",
],
native: [],
};
......
......@@ -40,20 +40,31 @@ class GroqLLM {
streamingEnabled() {
return "streamChat" in this && "streamGetChatCompletion" in this;
}
promptWindowLimit() {
switch (this.model) {
case "llama2-70b-4096":
return 4096;
case "mixtral-8x7b-32768":
return 32_768;
case "llama3-8b-8192":
return 8192;
case "llama3-70b-8192":
return 8192;
case "gemma-7b-it":
return 8192;
default:
return 4096;
}
}
async isValidChatCompletionModel(modelName = "") {
const validModels = ["llama2-70b-4096", "mixtral-8x7b-32768"];
const validModels = [
"llama2-70b-4096",
"mixtral-8x7b-32768",
"llama3-8b-8192",
"llama3-70b-8192",
"gemma-7b-it",
];
const isPreset = validModels.some((model) => modelName === model);
if (isPreset) return true;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment