diff --git a/gpt4all-chat/CHANGELOG.md b/gpt4all-chat/CHANGELOG.md index a81124927e9d..bbfb0b0e6da1 100644 --- a/gpt4all-chat/CHANGELOG.md +++ b/gpt4all-chat/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). ## [Unreleased] ### Added +- Add Gemma-2-9b-it to models3.json (by [@ThiloteE](https://github.com/ThiloteE) in [#2803](https://github.com/nomic-ai/gpt4all/pull/2803)) - Use greedy sampling when temperature is set to zero ([#2854](https://github.com/nomic-ai/gpt4all/pull/2854)) - Use configured system prompt in server mode and ignore system messages ([#2921](https://github.com/nomic-ai/gpt4all/pull/2921), [#2924](https://github.com/nomic-ai/gpt4all/pull/2924)) - Add more system information to anonymous usage stats ([#2939](https://github.com/nomic-ai/gpt4all/pull/2939)) diff --git a/gpt4all-chat/metadata/models3.json b/gpt4all-chat/metadata/models3.json index bce7a7324a36..624e16ff562d 100644 --- a/gpt4all-chat/metadata/models3.json +++ b/gpt4all-chat/metadata/models3.json @@ -31,6 +31,22 @@ "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n", "systemPrompt": "" }, + { + "order": "bb", + "md5sum": "9659a4470dc7d5d75be39daa913ecd0d", + "name": "Gemma 2 9b it", + "filename": "gemma-2-9b-it-Q4_0.gguf", + "filesize": "5443142592", + "requires": "3.1", + "ramrequired": "16", + "parameters": "9 billion", + "quant": "q4_0", + "type": "gemma2", + "description": "", + "url": "https://huggingface.co/GPT4All-Community/gemma-2-9b-it-GGUF/resolve/main/gemma-2-9b-it-Q4_0.gguf", + "promptTemplate": "user\n%1\nmodel\n%2\n", + "systemPrompt": "" + }, { "order": "c", "md5sum": "97463be739b50525df56d33b26b00852",