mirror of
https://github.com/gbtb/nix-stable-diffusion.git
synced 2024-09-11 11:16:15 +03:00
added InvokeAI shell with nvidia support,
updated readme, removed mkl overlay, changed output devshells names
This commit is contained in:
parent
37f5125f0f
commit
a018316066
@ -2,14 +2,17 @@
|
||||
Flake for running SD on NixOS
|
||||
|
||||
## What's done
|
||||
* Nix devShell capable of running InvokeAI's flavor of SD without need to fallback for pip or conda (targets AMD ROCM)
|
||||
* Nix devShell capable of running InvokeAI's flavor of SD without need to fallback for pip or conda (including AMD ROCM support)
|
||||
* ...???
|
||||
* PROFIT
|
||||
|
||||
# How to use it?
|
||||
1. Clone repo
|
||||
1. Clone submodule with InvokeAI
|
||||
1. Run `nix develop`, wait for shell to build
|
||||
1. Run `nix develop .#invokeai.{default,nvidia,amd}`, wait for shell to build
|
||||
1. `.#invokeai.default` builds shell which overrides bare minimum required for SD to run
|
||||
1. `.#invokeai.amd` builds shell which overrides torch packages with ROCM-enabled bin versions
|
||||
1. `.#invokeai.nvidia` builds shell with overlay explicitly setting `cudaSupport = true` for torch
|
||||
1. Inside InvokeAI's directory, run `python scripts/preload_models.py` to preload models
|
||||
1. Place SD weights into `models/ldm/stable-diffusion-v1/model.ckpt`
|
||||
1. Run CLI with `python scripts/invoke.py` or GUI with `python scripts/invoke.py --web`
|
||||
@ -17,7 +20,7 @@ Flake for running SD on NixOS
|
||||
|
||||
## What's needed to be done
|
||||
|
||||
- [ ] devShell with CUDA support (should be trivial, but requires volunteer with NVidia GPU)
|
||||
- [x] devShell with CUDA support (should be trivial, but requires volunteer with NVidia GPU)
|
||||
- [ ] Missing packages definitions should be submitted to Nixpkgs
|
||||
- [ ] Investigate ROCM device warning on startup
|
||||
- [ ] Apply patches so that all downloaded models would go into one specific folder
|
||||
|
83
flake.nix
83
flake.nix
@ -113,6 +113,14 @@
|
||||
});
|
||||
torch = torch-bin;
|
||||
torchvision = torchvision-bin;
|
||||
#overriding because of https://github.com/NixOS/nixpkgs/issues/196653
|
||||
opencv4 = pythonPackages.opencv4.override { openblas = nixpkgs.blas; };
|
||||
};
|
||||
overlay_nvidia = nixpkgs: pythonPackages:
|
||||
{
|
||||
torch = pythonPackages.torch.override {
|
||||
cudaSupport = true;
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
@ -122,7 +130,19 @@
|
||||
let
|
||||
nixpkgs_ = import inputs.nixpkgs {
|
||||
inherit system;
|
||||
config.allowUnfree = true; #both CUDA and MKL are unfree
|
||||
overlays = [
|
||||
(final: prev: {
|
||||
python3 = prev.python3.override {
|
||||
packageOverrides =
|
||||
python-self: python-super:
|
||||
(overlay_default prev python-super) //
|
||||
(overlay_pynixify python-self);
|
||||
};
|
||||
})
|
||||
];
|
||||
};
|
||||
nixpkgs_amd = import inputs.nixpkgs {
|
||||
inherit system;
|
||||
overlays = [
|
||||
(final: prev: {
|
||||
python3 = prev.python3.override {
|
||||
@ -131,31 +151,54 @@
|
||||
(overlay_default prev python-super) //
|
||||
(overlay_amd prev python-super) //
|
||||
(overlay_pynixify python-self);
|
||||
#((import ./pynixify/overlay.nix) python-self python-super);
|
||||
};
|
||||
})
|
||||
];
|
||||
};
|
||||
nixpkgs_nvidia = import inputs.nixpkgs {
|
||||
inherit system;
|
||||
config.allowUnfree = true; #CUDA is unfree
|
||||
overlays = [
|
||||
(final: prev: {
|
||||
python3 = prev.python3.override {
|
||||
packageOverrides =
|
||||
python-self: python-super:
|
||||
(overlay_default prev python-super) //
|
||||
(overlay_nvidia prev python-super) //
|
||||
(overlay_pynixify python-self);
|
||||
};
|
||||
})
|
||||
];
|
||||
};
|
||||
in
|
||||
rec {
|
||||
invokeai-amd = nixpkgs_.mkShell
|
||||
(let
|
||||
lapack = nixpkgs_.lapack.override { lapackProvider = nixpkgs_.mkl; };
|
||||
blas = nixpkgs_.lapack.override { lapackProvider = nixpkgs_.mkl; };
|
||||
in
|
||||
{
|
||||
name = "invokeai-amd";
|
||||
propagatedBuildInputs = requirements nixpkgs_;
|
||||
shellHook = ''
|
||||
#on my machine SD segfaults somewhere inside scipy with openblas, so I had to use another blas impl
|
||||
#build of scipy with non-default blas is broken, therefore overriding lib in runtime
|
||||
|
||||
export NIXPKGS_ALLOW_UNFREE=1
|
||||
export LD_LIBRARY_PATH=${lapack}/lib:${blas}/lib
|
||||
cd InvokeAI
|
||||
'';
|
||||
});
|
||||
default = invokeai-amd;
|
||||
invokeai = {
|
||||
default = nixpkgs_amd.mkShell
|
||||
({
|
||||
name = "invokeai";
|
||||
propagatedBuildInputs = requirements nixpkgs_;
|
||||
shellHook = ''
|
||||
cd InvokeAI
|
||||
'';
|
||||
});
|
||||
amd = nixpkgs_amd.mkShell
|
||||
({
|
||||
name = "invokeai.amd";
|
||||
propagatedBuildInputs = requirements nixpkgs_amd;
|
||||
shellHook = ''
|
||||
cd InvokeAI
|
||||
'';
|
||||
});
|
||||
nvidia = nixpkgs_nvidia.mkShell
|
||||
({
|
||||
name = "invokeai.nvidia";
|
||||
propagatedBuildInputs = requirements nixpkgs_nvidia;
|
||||
shellHook = ''
|
||||
cd InvokeAI
|
||||
'';
|
||||
});
|
||||
};
|
||||
default = invokeai.amd;
|
||||
});
|
||||
};
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user