chore(example/llm): Reintroduce models

See README
This commit is contained in:
Sridhar Ratnakumar 2024-06-14 12:42:52 -04:00 committed by Sridhar Ratnakumar
parent 2e60588ffd
commit 9b88034e0a
2 changed files with 9 additions and 0 deletions

View File

@ -4,3 +4,4 @@ While `services-flake` is generally used for running services in a *development*
`example/llm` runs two processes ollama and open-webui, while storing the ollama data under `$HOME/.services-flake/ollama`. You can change this path in `flake.nix`.
By default, a single model (`llama2-uncensored`) is downloaded. You can modify this in `flake.nix` as well.

View File

@ -21,12 +21,20 @@
# Backend service to perform inference on LLM models
ollama."ollama1" = {
enable = true;
# The models are usually huge, downloading them in every project
# directory can lead to a lot of duplication. Change here to a
# directory where the Ollama models can be stored and shared across
# projects.
dataDir = "$HOME/.services-flake/ollama1";
# Define the models to download when our app starts
#
# You can also initialize this to empty list, and download the
# models manually in the UI.
models = [ "llama2-uncensored" ];
};
# Get ChatGPT like UI, but open-source, with Open WebUI
open-webui."open-webui1" = {
enable = true;