diff --git a/gpt4all-chat/metadata/models3.json b/gpt4all-chat/metadata/models3.json
index 4cc8fddf..112ea79d 100644
--- a/gpt4all-chat/metadata/models3.json
+++ b/gpt4all-chat/metadata/models3.json
@@ -1,17 +1,17 @@
[
{
"order": "a",
- "md5sum": "3a265fbb343693d283f8a4ec5e7f1529",
- "name": "Llama 3.1 8B Instruct",
- "filename": "Meta-Llama-3.1-8B-Instruct.Q4_0.gguf",
- "filesize": "4661211808",
- "requires": "3.1.0",
+ "md5sum": "8a9c75bcd8a66b7693f158ec96924eeb",
+ "name": "Llama 3.1 8B Instruct 128k",
+ "filename": "Meta-Llama-3.1-8B-Instruct-128k-Q4_0.gguf",
+ "filesize": "4661212096",
+ "requires": "3.1.1",
"ramrequired": "8",
"parameters": "8 billion",
"quant": "q4_0",
"type": "LLaMA3",
- "description": "
",
- "url": "https://huggingface.co/3Simplex/Meta-Llama-3.1-8B-Instruct-gguf/resolve/main/Meta-Llama-3.1-8B-Instruct-Q4_0.gguf",
+ "description": "- Fast responses
- Chat based model
- Large context size of 128k
- Accepts agentic system prompts in Llama 3.1 format
- Trained by Meta
- License: Meta Llama 3.1 Community License
",
+ "url": "https://huggingface.co/GPT4All-Community/Meta-Llama-3.1-8B-Instruct-128k/blob/main/Meta-Llama-3.1-8B-Instruct-128k-Q4_0.gguf",
"promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2",
"systemPrompt": "<|start_header_id|>system<|end_header_id|>\nCutting Knowledge Date: December 2023\n\nYou are a helpful assistant.<|eot_id|>"
},