From 8460e70c46fc8ea688f59fb64acc9e33250551d1 Mon Sep 17 00:00:00 2001 From: oddomatik Date: Fri, 25 Aug 2023 15:02:54 -0700 Subject: [PATCH 1/3] Invoke 3.0 Dockerfile InvokeAI 3.0 has a completely different structure and tries to auto-download everything. There may be better solutions for this such as skipping model downloads or similar, but after initial run the container should start quickly. Most of the symlinks should capture the models roughly into the existing structure used in stable-diffusion-webui-docker repo but further testing should be done. Docker file is based on upstream invokeai's Dockerfile --- services/invoke/Dockerfile | 172 +++++++++++++++++++++++++++++-------- 1 file changed, 136 insertions(+), 36 deletions(-) diff --git a/services/invoke/Dockerfile b/services/invoke/Dockerfile index 209c368..db64004 100644 --- a/services/invoke/Dockerfile +++ b/services/invoke/Dockerfile @@ -1,53 +1,153 @@ -FROM alpine:3.17 as xformers -RUN apk add --no-cache aria2 -RUN aria2c -x 5 --dir / --out wheel.whl 'https://github.com/AbdBarho/stable-diffusion-webui-docker/releases/download/6.0.0/xformers-0.0.21.dev544-cp310-cp310-manylinux2014_x86_64-pytorch201.whl' +# syntax=docker/dockerfile:1.4 +## Builder stage -FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime +FROM library/ubuntu:22.04 AS builder -ENV DEBIAN_FRONTEND=noninteractive PIP_EXISTS_ACTION=w PIP_PREFER_BINARY=1 +ARG DEBIAN_FRONTEND=noninteractive +RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt update && apt-get install -y \ + git \ + python3.10-venv \ + python3-pip \ + build-essential -# patch match: -# https://github.com/invoke-ai/InvokeAI/blob/main/docs/installation/INSTALL_PATCHMATCH.md -RUN --mount=type=cache,target=/var/cache/apt \ - apt-get update && \ - apt-get install make g++ git libopencv-dev -y && \ - apt-get clean && \ - cd /usr/lib/x86_64-linux-gnu/pkgconfig/ && \ - ln -sf opencv4.pc opencv.pc +#ENV INVOKE_AI_ROOT=/InvokeAI +ENV INVOKEAI_SRC=/opt/invokeai +ENV VIRTUAL_ENV=/opt/venv/invokeai +ENV PATH="$VIRTUAL_ENV/bin:$PATH" +ARG TORCH_VERSION=2.0.1 +ARG TORCHVISION_VERSION=0.15.2 +ARG GPU_DRIVER=cuda +ARG TARGETPLATFORM="linux/amd64" +# unused but available +ARG BUILDPLATFORM -ENV ROOT=/InvokeAI -RUN git clone https://github.com/invoke-ai/InvokeAI.git ${ROOT} -WORKDIR ${ROOT} +WORKDIR ${INVOKEAI_SRC} +# Install pytorch before all other pip packages +# NOTE: there are no pytorch builds for arm64 + cuda, only cpu +# x86_64/CUDA is default RUN --mount=type=cache,target=/root/.cache/pip \ - git reset --hard f3b2e02921927d9317255b1c3811f47bd40a2bf9 && \ - pip install -e . + python3 -m venv ${VIRTUAL_ENV} &&\ + if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \ + extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \ + elif [ "$GPU_DRIVER" = "rocm" ]; then \ + extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.4.2"; \ + else \ + extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu118"; \ + fi &&\ + pip install $extra_index_url_arg \ + torch==$TORCH_VERSION \ + torchvision==$TORCHVISION_VERSION +RUN git clone https://github.com/invoke-ai/InvokeAI.git ${INVOKEAI_SRC} -ARG BRANCH=main SHA=f3b2e02921927d9317255b1c3811f47bd40a2bf9 +# Install the local package. +# Editable mode helps use the same image for development: +# the local working copy can be bind-mounted into the image +# at path defined by ${INVOKEAI_SRC} +#COPY invokeai ./invokeai +#COPY pyproject.toml ./ +#RUN cp ${INVOKEAI_SRC}/pyproject.toml ./ RUN --mount=type=cache,target=/root/.cache/pip \ - git fetch && \ - git reset --hard && \ - git checkout ${BRANCH} && \ - git reset --hard ${SHA} && \ - pip install -U -e . + # xformers + triton fails to install on arm64 + if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \ + pip install -e ".[xformers]"; \ + else \ + pip install -e "."; \ + fi -RUN --mount=type=cache,target=/root/.cache/pip \ - --mount=type=bind,from=xformers,source=/wheel.whl,target=/xformers-0.0.21-cp310-cp310-linux_x86_64.whl \ - pip install -U opencv-python-headless triton /xformers-0.0.21-cp310-cp310-linux_x86_64.whl && \ - python3 -c "from patchmatch import patch_match" +# #### Build the Web UI ------------------------------------ + +ENV INVOKEAI_SRC=/opt/invokeai + +FROM node:18 AS web-builder +WORKDIR /build +COPY --from=builder /opt/invokeai/invokeai/frontend/web/ ./ +RUN --mount=type=cache,target=/usr/lib/node_modules \ + npm install --include dev +RUN --mount=type=cache,target=/usr/lib/node_modules \ + yarn vite build -COPY . /docker/ +#### Runtime stage --------------------------------------- -ENV NVIDIA_VISIBLE_DEVICES=all -ENV PYTHONUNBUFFERED=1 PRELOAD=false HF_HOME=/root/.cache/huggingface CONFIG_DIR=/data/config/invoke CLI_ARGS="" -EXPOSE 7860 +FROM library/ubuntu:22.04 AS runtime -ENTRYPOINT ["/docker/entrypoint.sh"] -CMD invokeai --web --host 0.0.0.0 --port 7860 --root_dir ${ROOT} --config ${CONFIG_DIR}/models.yaml \ - --outdir /output/invoke --embedding_directory /data/embeddings/ --lora_directory /data/models/Lora \ - --no-nsfw_checker --no-safety_checker ${CLI_ARGS} +ARG DEBIAN_FRONTEND=noninteractive +ENV PYTHONUNBUFFERED=1 +ENV PYTHONDONTWRITEBYTECODE=1 +RUN apt update && apt install -y --no-install-recommends \ + git \ + curl \ + vim \ + tmux \ + ncdu \ + iotop \ + bzip2 \ + gosu \ + libglib2.0-0 \ + libgl1-mesa-glx \ + python3-venv \ + python3-pip \ + build-essential \ + libopencv-dev \ + libstdc++-10-dev &&\ + apt-get clean && apt-get autoclean + +# globally add magic-wormhole +# for ease of transferring data to and from the container +# when running in sandboxed cloud environments; e.g. Runpod etc. +RUN pip install magic-wormhole + +ENV INVOKEAI_SRC=/opt/invokeai +ENV VIRTUAL_ENV=/opt/venv/invokeai +ENV INVOKEAI_ROOT=/invokeai +ENV PATH="$VIRTUAL_ENV/bin:$INVOKEAI_SRC:$PATH" + +# --link requires buldkit w/ dockerfile syntax 1.4 +COPY --link --from=builder ${INVOKEAI_SRC} ${INVOKEAI_SRC} +COPY --link --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV} +COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist + +# Link amdgpu.ids for ROCm builds +# contributed by https://github.com/Rubonnek +RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\ + ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids" + +WORKDIR ${INVOKEAI_SRC} + +# build patchmatch +RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc +RUN python3 -c "from patchmatch import patch_match" + +# Create unprivileged user and make the local dir +RUN useradd --create-home --shell /bin/bash -u 1000 --comment "container local user" invoke +RUN mkdir -p ${INVOKEAI_ROOT} && chown -R invoke:invoke ${INVOKEAI_ROOT} + +# Create autoimport directories +RUN mkdir -p ${INVOKEAI_ROOT}/autoimport/embedding +RUN mkdir ${INVOKEAI_ROOT}/autoimport/main +RUN mkdir ${INVOKEAI_ROOT}/autoimport/lora +RUN mkdir ${INVOKEAI_ROOT}/autoimport/controlnet + +# AbdBarho file structure +RUN mkdir -p ${INVOKEAI_ROOT}/models/core/upscaling +RUN ln -s /data/models/Stable-diffusion/ ${INVOKEAI_ROOT}/autoimport/main/ +RUN ln -s /data/embeddings/ ${INVOKEAI_ROOT}/autoimport/embedding/ +RUN ln -s /data/models/Lora ${INVOKEAI_ROOT}/autoimport/lora/ +RUN ln -s /data/models/ControlNet ${INVOKEAI_ROOT}/autoimport/controlnet/ +RUN rm -rf ${INVOKEAI_ROOT}/models/core/upscaling/realesrgan && \ + ln -s /data/models/RealESRGAN ${INVOKEAI_ROOT}/models/core/upscaling/realesrgan +RUN rm -rf ${INVOKEAI_ROOT}/models/core/convert && \ + ln -s /data/models/invoke/ ${INVOKEAI_ROOT}/models/core/convert + +#COPY docker/docker-entrypoint.sh ./ +RUN cp ${INVOKEAI_SRC}/docker/docker-entrypoint.sh ./ +ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"] +CMD ["invokeai-web", "--port", "7860", "--host", "0.0.0.0", "--outdir", "/output/invoke", "--conf_path", "/data/config/invoke/configs/models.yaml", "--db_dir", "/data/config/invoke/database"] From 62aa878b46b8f31b8b8fbe8473c52cebf70e99fd Mon Sep 17 00:00:00 2001 From: oddomatik Date: Fri, 25 Aug 2023 18:44:32 -0700 Subject: [PATCH 2/3] Comment cleanup --- services/invoke/Dockerfile | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/services/invoke/Dockerfile b/services/invoke/Dockerfile index db64004..2212781 100644 --- a/services/invoke/Dockerfile +++ b/services/invoke/Dockerfile @@ -45,14 +45,13 @@ RUN --mount=type=cache,target=/root/.cache/pip \ torchvision==$TORCHVISION_VERSION RUN git clone https://github.com/invoke-ai/InvokeAI.git ${INVOKEAI_SRC} +# Define specific hash here # Install the local package. # Editable mode helps use the same image for development: # the local working copy can be bind-mounted into the image # at path defined by ${INVOKEAI_SRC} -#COPY invokeai ./invokeai -#COPY pyproject.toml ./ -#RUN cp ${INVOKEAI_SRC}/pyproject.toml ./ + RUN --mount=type=cache,target=/root/.cache/pip \ # xformers + triton fails to install on arm64 if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \ @@ -130,7 +129,7 @@ RUN python3 -c "from patchmatch import patch_match" RUN useradd --create-home --shell /bin/bash -u 1000 --comment "container local user" invoke RUN mkdir -p ${INVOKEAI_ROOT} && chown -R invoke:invoke ${INVOKEAI_ROOT} -# Create autoimport directories +# Create autoimport directories for symlinks RUN mkdir -p ${INVOKEAI_ROOT}/autoimport/embedding RUN mkdir ${INVOKEAI_ROOT}/autoimport/main RUN mkdir ${INVOKEAI_ROOT}/autoimport/lora From f60d2b787e522b5303135834c04a5264d48ac9e2 Mon Sep 17 00:00:00 2001 From: oddomatik Date: Fri, 25 Aug 2023 23:14:55 -0700 Subject: [PATCH 3/3] link caching directory invokeai converts models into diffuser format and caches them. We don't want to have to regenerate this cache each time we rebuild container, but it will take a LOT of space on the local drive. --- services/invoke/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/services/invoke/Dockerfile b/services/invoke/Dockerfile index 2212781..79528f6 100644 --- a/services/invoke/Dockerfile +++ b/services/invoke/Dockerfile @@ -145,6 +145,7 @@ RUN rm -rf ${INVOKEAI_ROOT}/models/core/upscaling/realesrgan && \ ln -s /data/models/RealESRGAN ${INVOKEAI_ROOT}/models/core/upscaling/realesrgan RUN rm -rf ${INVOKEAI_ROOT}/models/core/convert && \ ln -s /data/models/invoke/ ${INVOKEAI_ROOT}/models/core/convert +RUN ln -s /data/config/invoke/.cache ${INVOKEAI_ROOT}/models/ #COPY docker/docker-entrypoint.sh ./ RUN cp ${INVOKEAI_SRC}/docker/docker-entrypoint.sh ./