mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-11-20 11:41:29 +03:00
[DATALAD RUNCMD] run codespell throughout
=== Do not change lines below === { "chain": [], "cmd": "codespell -w", "exit": 0, "extra_inputs": [], "inputs": [], "outputs": [], "pwd": "." } ^^^ Do not change lines above ^^^
This commit is contained in:
parent
e4bc9c0c3b
commit
c942780f5a
@ -9,7 +9,7 @@ Please note we have a code of conduct, please follow it in all your interactions
|
|||||||
|
|
||||||
1. Ensure any install or build dependencies are removed before the end of the layer when doing a build.
|
1. Ensure any install or build dependencies are removed before the end of the layer when doing a build.
|
||||||
2. Make sure Pull Request is tagged with appropriate project identifiers and has a clear description of contribution.
|
2. Make sure Pull Request is tagged with appropriate project identifiers and has a clear description of contribution.
|
||||||
3. Any new or updated code must have documentation and preferrably tests included with Pull Request.
|
3. Any new or updated code must have documentation and preferably tests included with Pull Request.
|
||||||
4. Significant feature or code changes should provide a short video or screenshot demo.
|
4. Significant feature or code changes should provide a short video or screenshot demo.
|
||||||
4. Fill out relevant parts of Pull Request template.
|
4. Fill out relevant parts of Pull Request template.
|
||||||
4. Pull requests must have sign-off from one other developer. Reach out to a repository owner once your
|
4. Pull requests must have sign-off from one other developer. Reach out to a repository owner once your
|
||||||
|
@ -181,7 +181,7 @@ class GPT4All():
|
|||||||
with value of "system", "assistant", or "user" and a "content" key with a
|
with value of "system", "assistant", or "user" and a "content" key with a
|
||||||
string value. Messages are organized such that "system" messages are at top of prompt,
|
string value. Messages are organized such that "system" messages are at top of prompt,
|
||||||
and "user" and "assistant" messages are displayed in order. Assistant messages get formatted as
|
and "user" and "assistant" messages are displayed in order. Assistant messages get formatted as
|
||||||
"Reponse: {content}".
|
"Response: {content}".
|
||||||
default_prompt_header: If True (default), add default prompt header after any system role messages and
|
default_prompt_header: If True (default), add default prompt header after any system role messages and
|
||||||
before user/assistant role messages.
|
before user/assistant role messages.
|
||||||
default_prompt_footer: If True (default), add default footer at end of prompt.
|
default_prompt_footer: If True (default), add default footer at end of prompt.
|
||||||
|
@ -4,7 +4,7 @@ import sys
|
|||||||
from gpt4all import pyllmodel
|
from gpt4all import pyllmodel
|
||||||
|
|
||||||
# TODO: Integration test for loadmodel and prompt.
|
# TODO: Integration test for loadmodel and prompt.
|
||||||
# # Right now, too slow b/c it requries file download.
|
# # Right now, too slow b/c it requires file download.
|
||||||
|
|
||||||
def test_create_gptj():
|
def test_create_gptj():
|
||||||
gptj = pyllmodel.GPTJModel()
|
gptj = pyllmodel.GPTJModel()
|
||||||
|
@ -91,7 +91,7 @@ def train(accelerator, config):
|
|||||||
total_num_steps += int(total_num_steps * lr_ratio) + config["warmup_steps"]
|
total_num_steps += int(total_num_steps * lr_ratio) + config["warmup_steps"]
|
||||||
accelerator.print(f"Total training steps: {total_num_steps}")
|
accelerator.print(f"Total training steps: {total_num_steps}")
|
||||||
|
|
||||||
# Creates Dummy Scheduler if `scheduler` was spcified in the config file else creates `args.lr_scheduler_type` Scheduler
|
# Creates Dummy Scheduler if `scheduler` was specified in the config file else creates `args.lr_scheduler_type` Scheduler
|
||||||
if (
|
if (
|
||||||
accelerator.state.deepspeed_plugin is None
|
accelerator.state.deepspeed_plugin is None
|
||||||
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
|
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
|
||||||
|
Loading…
Reference in New Issue
Block a user