diff --git a/gpt4all-chat/metadata/models3.json b/gpt4all-chat/metadata/models3.json new file mode 100644 index 00000000..5e33ca0f --- /dev/null +++ b/gpt4all-chat/metadata/models3.json @@ -0,0 +1,257 @@ +[ + { + "order": "a", + "md5sum": "6d1ca6e9533d177361fe2612a2c87474", + "name": "Gemma Instruct", + "filename": "gemma-7b-it.Q4_0.gguf", + "filesize": "4809316512", + "requires": "2.7.1", + "ramrequired": "8", + "parameters": "7 billion", + "quant": "q4_0", + "type": "Gemma", + "description": "A state-of-the-art open model from Google
", + "url": "https://gpt4all.io/models/gguf/gemma-7b-it.Q4_0.gguf", + "promptTemplate": "user\n%1\nmodel\n%2\n", + "systemPrompt": "" + }, + { + "order": "b", + "md5sum": "f692417a22405d80573ac10cb0cd6c6a", + "name": "Mistral OpenOrca", + "filename": "mistral-7b-openorca.Q4_0.gguf2.gguf", + "filesize": "4108928128", + "requires": "2.5.0", + "ramrequired": "8", + "parameters": "7 billion", + "quant": "q4_0", + "type": "Mistral", + "description": "Best overall fast chat model
  • Fast responses
  • Chat based model
  • Trained by Mistral AI
  • Finetuned on OpenOrca dataset curated via Nomic Atlas
  • Licensed for commercial use
", + "url": "https://gpt4all.io/models/gguf/mistral-7b-openorca.Q4_0.gguf", + "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n", + "systemPrompt": "<|im_start|>system\nYou are MistralOrca, a large language model trained by Alignment Lab AI. For multi-step problems, write out your reasoning for each step.\n<|im_end|>" + }, + { + "order": "c", + "md5sum": "c4c78adf744d6a20f05c8751e3961b84", + "name": "GPT4All Falcon", + "filename": "gpt4all-falcon-newbpe-q4_0.gguf", + "filesize": "4210994112", + "requires": "2.6.0", + "ramrequired": "8", + "parameters": "7 billion", + "quant": "q4_0", + "type": "Falcon", + "systemPrompt": " ", + "description": "Very fast model with good quality
  • Fastest responses
  • Instruction based
  • Trained by TII
  • Finetuned by Nomic AI
  • Licensed for commercial use
", + "url": "https://gpt4all.io/models/gguf/gpt4all-falcon-newbpe-q4_0.gguf", + "promptTemplate": "### Instruction:\n%1\n### Response:\n" + }, + { + "order": "d", + "md5sum": "97463be739b50525df56d33b26b00852", + "name": "Mistral Instruct", + "filename": "mistral-7b-instruct-v0.1.Q4_0.gguf", + "filesize": "4108916384", + "requires": "2.5.0", + "ramrequired": "8", + "parameters": "7 billion", + "quant": "q4_0", + "type": "Mistral", + "systemPrompt": " ", + "description": "Best overall fast instruction following model
  • Fast responses
  • Trained by Mistral AI
  • Uncensored
  • Licensed for commercial use
", + "url": "https://gpt4all.io/models/gguf/mistral-7b-instruct-v0.1.Q4_0.gguf", + "promptTemplate": "[INST] %1 [/INST]" + }, + { + "order": "e", + "md5sum": "00c8593ba57f5240f59662367b3ed4a5", + "name": "Orca 2 (Medium)", + "filename": "orca-2-7b.Q4_0.gguf", + "filesize": "3825824192", + "requires": "2.5.2", + "ramrequired": "8", + "parameters": "7 billion", + "quant": "q4_0", + "type": "LLaMA2", + "systemPrompt": " ", + "description": "
  • Instruction based
  • Trained by Microsoft
  • Cannot be used commercially
", + "url": "https://gpt4all.io/models/gguf/orca-2-7b.Q4_0.gguf" + }, + { + "order": "f", + "md5sum": "3c0d63c4689b9af7baa82469a6f51a19", + "name": "Orca 2 (Full)", + "filename": "orca-2-13b.Q4_0.gguf", + "filesize": "7365856064", + "requires": "2.5.2", + "ramrequired": "16", + "parameters": "13 billion", + "quant": "q4_0", + "type": "LLaMA2", + "systemPrompt": " ", + "description": "
  • Instruction based
  • Trained by Microsoft
  • Cannot be used commercially
", + "url": "https://gpt4all.io/models/gguf/orca-2-13b.Q4_0.gguf" + }, + { + "order": "g", + "md5sum": "5aff90007499bce5c64b1c0760c0b186", + "name": "Wizard v1.2", + "filename": "wizardlm-13b-v1.2.Q4_0.gguf", + "filesize": "7365834624", + "requires": "2.5.0", + "ramrequired": "16", + "parameters": "13 billion", + "quant": "q4_0", + "type": "LLaMA2", + "systemPrompt": " ", + "description": "Best overall larger model
  • Instruction based
  • Gives very long responses
  • Finetuned with only 1k of high-quality data
  • Trained by Microsoft and Peking University
  • Cannot be used commercially
", + "url": "https://gpt4all.io/models/gguf/wizardlm-13b-v1.2.Q4_0.gguf" + }, + { + "order": "h", + "md5sum": "3d12810391d04d1153b692626c0c6e16", + "name": "Hermes", + "filename": "nous-hermes-llama2-13b.Q4_0.gguf", + "filesize": "7366062080", + "requires": "2.5.0", + "ramrequired": "16", + "parameters": "13 billion", + "quant": "q4_0", + "type": "LLaMA2", + "systemPrompt": " ", + "description": "Extremely good model
  • Instruction based
  • Gives long responses
  • Curated with 300,000 uncensored instructions
  • Trained by Nous Research
  • Cannot be used commercially
", + "url": "https://gpt4all.io/models/gguf/nous-hermes-llama2-13b.Q4_0.gguf", + "promptTemplate": "### Instruction:\n%1\n### Response:\n" + }, + { + "order": "i", + "md5sum": "40388eb2f8d16bb5d08c96fdfaac6b2c", + "name": "Snoozy", + "filename": "gpt4all-13b-snoozy-q4_0.gguf", + "filesize": "7365834624", + "requires": "2.5.0", + "ramrequired": "16", + "parameters": "13 billion", + "quant": "q4_0", + "type": "LLaMA", + "systemPrompt": " ", + "description": "Very good overall model
  • Instruction based
  • Based on the same dataset as Groovy
  • Slower than Groovy, with higher quality responses
  • Trained by Nomic AI
  • Cannot be used commercially
", + "url": "https://gpt4all.io/models/gguf/gpt4all-13b-snoozy-q4_0.gguf" + }, + { + "order": "j", + "md5sum": "15dcb4d7ea6de322756449c11a0b7545", + "name": "MPT Chat", + "filename": "mpt-7b-chat-newbpe-q4_0.gguf", + "filesize": "3912373472", + "requires": "2.6.0", + "ramrequired": "8", + "parameters": "7 billion", + "quant": "q4_0", + "type": "MPT", + "description": "Good model with novel architecture
  • Fast responses
  • Chat based
  • Trained by Mosaic ML
  • Cannot be used commercially
", + "url": "https://gpt4all.io/models/gguf/mpt-7b-chat-newbpe-q4_0.gguf", + "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n", + "systemPrompt": "<|im_start|>system\n- You are a helpful assistant chatbot trained by MosaicML.\n- You answer questions.\n- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.\n- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.<|im_end|>" + }, + { + "order": "k", + "md5sum": "0e769317b90ac30d6e09486d61fefa26", + "name": "Mini Orca (Small)", + "filename": "orca-mini-3b-gguf2-q4_0.gguf", + "filesize": "1979946720", + "requires": "2.5.0", + "ramrequired": "4", + "parameters": "3 billion", + "quant": "q4_0", + "type": "OpenLLaMa", + "description": "Small version of new model with novel dataset
  • Instruction based
  • Explain tuned datasets
  • Orca Research Paper dataset construction approaches
  • Cannot be used commercially
", + "url": "https://gpt4all.io/models/gguf/orca-mini-3b-gguf2-q4_0.gguf", + "promptTemplate": "### User:\n%1\n### Response:\n", + "systemPrompt": "### System:\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\n\n" + }, + { + "order": "l", + "md5sum": "c232f17e09bca4b7ee0b5b1f4107c01e", + "disableGUI": "true", + "name": "Replit", + "filename": "replit-code-v1_5-3b-newbpe-q4_0.gguf", + "filesize": "1953055104", + "requires": "2.6.0", + "ramrequired": "4", + "parameters": "3 billion", + "quant": "q4_0", + "type": "Replit", + "systemPrompt": " ", + "promptTemplate": "%1", + "description": "Trained on subset of the Stack
  • Code completion based
  • Licensed for commercial use
  • WARNING: Not available for chat GUI
", + "url": "https://gpt4all.io/models/gguf/replit-code-v1_5-3b-newbpe-q4_0.gguf" + }, + { + "order": "m", + "md5sum": "70841751ccd95526d3dcfa829e11cd4c", + "disableGUI": "true", + "name": "Starcoder", + "filename": "starcoder-newbpe-q4_0.gguf", + "filesize": "8987411904", + "requires": "2.6.0", + "ramrequired": "4", + "parameters": "7 billion", + "quant": "q4_0", + "type": "Starcoder", + "systemPrompt": " ", + "promptTemplate": "%1", + "description": "Trained on subset of the Stack
  • Code completion based
  • WARNING: Not available for chat GUI
", + "url": "https://gpt4all.io/models/gguf/starcoder-newbpe-q4_0.gguf" + }, + { + "order": "n", + "md5sum": "e973dd26f0ffa6e46783feaea8f08c83", + "disableGUI": "true", + "name": "Rift coder", + "filename": "rift-coder-v0-7b-q4_0.gguf", + "filesize": "3825903776", + "requires": "2.5.0", + "ramrequired": "8", + "parameters": "7 billion", + "quant": "q4_0", + "type": "LLaMA", + "systemPrompt": " ", + "promptTemplate": "%1", + "description": "Trained on collection of Python and TypeScript
  • Code completion based
  • WARNING: Not available for chat GUI
  • ", + "url": "https://gpt4all.io/models/gguf/rift-coder-v0-7b-q4_0.gguf" + }, + { + "order": "o", + "md5sum": "e479e6f38b59afc51a470d1953a6bfc7", + "disableGUI": "true", + "name": "SBert", + "filename": "all-MiniLM-L6-v2-f16.gguf", + "filesize": "45887744", + "requires": "2.5.0", + "ramrequired": "1", + "parameters": "40 million", + "quant": "f16", + "type": "Bert", + "systemPrompt": " ", + "description": "LocalDocs text embeddings model
    • For use with LocalDocs feature
    • Used for retrieval augmented generation (RAG)", + "url": "https://gpt4all.io/models/gguf/all-MiniLM-L6-v2-f16.gguf" + }, + { + "order": "p", + "md5sum": "919de4dd6f25351bcb0223790db1932d", + "name": "EM German Mistral", + "filename": "em_german_mistral_v01.Q4_0.gguf", + "filesize": "4108916352", + "requires": "2.5.0", + "ramrequired": "8", + "parameters": "7 billion", + "quant": "q4_0", + "type": "Mistral", + "description": "Mistral-based model for German-language applications
      • Fast responses
      • Chat based model
      • Trained by ellamind
      • Finetuned on German instruction and chat data
      • Licensed for commercial use
      ", + "url": "https://huggingface.co/TheBloke/em_german_mistral_v01-GGUF/resolve/main/em_german_mistral_v01.Q4_0.gguf", + "promptTemplate": "USER: %1 ASSISTANT: ", + "systemPrompt": "Du bist ein hilfreicher Assistent. " + } +]