README

README #

Add user #

bash
sudo useradd -Ums /bin/bash -d /home/deploy deploy
#sudo passwd deploy

sudo chmod go+rx /home/deploy

echo 'deploy  ALL=(ALL:ALL) NOPASSWD: ALL' | sudo tee -a /etc/sudoers

sudo su deploy -c 'install -m 700 -d ~/.ssh; touch ~/.ssh/authorized_keys; chmod 600 ~/.ssh/authorized_keys'

echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA....' | \
    sudo tee -a /home/deploy/.ssh/authorized_keys

sudo su deploy -c 'touch ~/.bash_aliases'
echo -e "alias mv='mv -i'\nalias rm='rm -i'\nalias cp='cp -i'" | sudo tee -a /home/deploy/.bash_aliases
echo -e '\n. ~/.bash_aliases\n' | sudo tee -a /home/deploy/.bashrc

history -c

Tools and configure #

Change hostname #

bash
# Ubuntu
sudo hostnamectl hostname ai-t-1

# CentOS
sudo hostnamectl set-hostname ai-t-1

Update pkgs #

bash
# Ubuntu
sudo apt update && sudo apt upgrade

# CentOS
sudo yum -y update

Apt install tools #

bash
# Ubuntu
sudo apt install -y unzip nload iftop tree screen

# CentOS
sudo yum install -y unzip nload iftop tree screen

Configure screen #

bash
sudo cp -f /etc/screenrc /etc/screenrc.orig

echo '
altscreen on
hardstatus on
hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%<"
' | sudo tee -a /etc/screenrc

# Use screen
screen -dR

Configure TMOUT #

bash
echo '
if [ "$STY" != "" ]; then
  # Never auto logout in screen.
  unset TMOUT
else
  # User input timeout (seconds), 0 for never auto logout.
  export TMOUT=1800
  #readonly TMOUT
fi
' | sudo tee -a /etc/profile.d/autologout.sh
# /home/deploy/.bashrc

Configure iftop #

bash
echo 'dns-resolution: no
port-resolution: no
port-display: on
use-bytes: yes
log-scale: yes' | sudo tee /root/.iftoprc

Upgrade python to 3.8 —-DEPRECATED #

bash
sudo add-apt-repository ppa:deadsnakes/ppa

sudo apt install -y python3.8

sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.8 40
sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.6 30

python3 -V
pip3 -V

# Fix apt: ModuleNotFoundError: No module named 'apt_pkg'
cd /usr/lib/python3/dist-packages
sudo ln -s apt_pkg.cpython-36m-x86_64-linux-gnu.so apt_pkg.so
# Verify apt
sudo apt install

Install docker #

Docs

Install on Ubuntu 22.04 (LTS) #

Docs

bash
# Update the apt package index and install packages to allow apt to use a repository over HTTPS
sudo apt-get update
sudo apt-get install -y ca-certificates curl gnupg

# Add Docker's official GPG key
sudo install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
sudo chmod a+r /etc/apt/keyrings/docker.gpg

# Use the following command to set up the repository
echo \
    "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
    sudo tee /etc/apt/sources.list.d/docker.list > /dev/null

sudo apt-get update

# To install the latest version, run:
sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin

Install on CentOS 7/8/9 #

bash
sudo yum install -y yum-utils
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

sudo yum -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin

Post install #

Start on boot #

bash
# Configure to start on boot
sudo systemctl enable docker.service
sudo systemctl enable containerd.service

registry-mirrors and log-driver #

/etc/docker/daemon.json:

json
{
  "registry-mirrors": [
    "https://docker.m.daocloud.io"
  ]
}
json
{
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "10m",
    "max-file": "1"
  }
}
bash
sudo systemctl daemon-reload
sudo systemctl restart docker.service

Run as non-root use #

https://docs.docker.com/engine/install/linux-postinstall/#manage-docker-as-a-non-root-user

bash
# Requires re-login after joining group:
#sudo groupadd docker
sudo usermod -aG docker $USER

Test docker #

bash
sudo docker run hello-world

Nvidia driver (Optional) #

Show nvidia driver installed #

bash
nvidia-smi
# Check output: CUDA Version >= 11.3
text
Mon Sep  4 14:51:37 2023
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 470.182.03   Driver Version: 470.182.03   CUDA Version: 11.4     |
|-------------------------------+----------------------+----------------------+
| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
|                               |                      |               MIG M. |
|===============================+======================+======================|
|   0  Tesla T4            On   | 00000000:00:08.0 Off |                  Off |
| N/A   29C    P8     9W /  70W |      0MiB / 16127MiB |      0%      Default |
|                               |                      |                  N/A |
+-------------------------------+----------------------+----------------------+

+-----------------------------------------------------------------------------+
| Processes:                                                                  |
|  GPU   GI   CI        PID   Type   Process name                  GPU Memory |
|        ID   ID                                                   Usage      |
|=============================================================================|
|  No running processes found                                                 |
+-----------------------------------------------------------------------------+

List pci devices #

bash
lspci
text
... ...
00:08.0 3D controller: NVIDIA Corporation TU104GL [Tesla T4] (rev a1)
... ...

Auto-install nvidia drivers #

Ref - Blog

bash
# List nvidia drivers
sudo ubuntu-drivers devices
text
ERROR:root:aplay command not found
== /sys/devices/pci0000:00/0000:00:08.0 ==
modalias : pci:v000010DEd00001EB8sv000010DEsd000012A2bc03sc02i00
vendor   : NVIDIA Corporation
model    : TU104GL [Tesla T4]
manual_install: True
driver   : nvidia-driver-450-server - distro non-free
driver   : nvidia-driver-418-server - distro non-free
driver   : nvidia-driver-535-server - distro non-free
driver   : nvidia-driver-535 - distro non-free recommended
driver   : nvidia-driver-525 - distro non-free
driver   : nvidia-driver-525-server - distro non-free
driver   : nvidia-driver-470 - distro non-free
driver   : nvidia-driver-470-server - distro non-free
driver   : xserver-xorg-video-nouveau - distro free builtin
bash
# Auto install nvidia driver
sudo ubuntu-drivers autoinstall

Ref - Download

Nvidia docker2 #

Ref - Blog

Ref - Github

Docs - Install

Install nvidia-container-toolkit #

For Ubuntu (apt):

bash
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \
    && curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
    && curl -s -L https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list | \
        sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
        sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
bash
sudo apt-get update
sudo apt-get install -y nvidia-docker2
#sudo nvidia-ctk runtime configure --runtime=docker --set-as-default
sudo nvidia-ctk runtime configure --runtime=docker
sudo systemctl restart docker

Check docker nvidia runtime #

bash
#sudo docker run --rm --runtime=nvidia --gpus all nvidia/cuda:12.2.0-base-ubuntu22.04 nvidia-smi
sudo docker run --rm --runtime=nvidia --gpus all nvidia/cuda:12.2.0-devel-ubuntu22.04 nvidia-smi
sudo docker run --rm --runtime=nvidia --gpus all nvidia/cuda:12.2.0-devel-ubuntu22.04 nvcc --version
text
Mon Sep  4 08:04:28 2023
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 470.182.03   Driver Version: 470.182.03   CUDA Version: 12.2     |
|-------------------------------+----------------------+----------------------+
| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
|                               |                      |               MIG M. |
|===============================+======================+======================|
|   0  Tesla T4            On   | 00000000:00:08.0 Off |                  Off |
| N/A   29C    P8     9W /  70W |      0MiB / 16127MiB |      0%      Default |
|                               |                      |                  N/A |
+-------------------------------+----------------------+----------------------+

+-----------------------------------------------------------------------------+
| Processes:                                                                  |
|  GPU   GI   CI        PID   Type   Process name                  GPU Memory |
|        ID   ID                                                   Usage      |
|=============================================================================|
|  No running processes found                                                 |
+-----------------------------------------------------------------------------+

TEST TEST TEST TEST #

bash
# Cuda
sudo docker run -it --name testing --runtime=nvidia --gpus all nvidia/cuda:12.2.0-devel-ubuntu22.04 bash

# Check glibc version
ldd --version

# Init conda for bash (optional)
conda init bash

# Test Miniconda3
conda list

# PyTorch (glibc >=2.17)
# https://pytorch.org/get-started/locally/#linux-pip
pip3 install torch torchvision torchaudio

# torch-2.0.1
# Download https://download.pytorch.org/whl/cu118/torch-2.0.1%2Bcu118-cp310-cp310-linux_x86_64.whl

# (Optional) BasicSR
https://github.com/XPixelGroup/BasicSR/blob/master/requirements.txt

# Test package from launch.py (webui)
python3 -c 'import importlib.util; print(importlib.util.find_spec("clip"));'
python3 -c 'import importlib.util; print(importlib.util.find_spec("xformers"));'
python3 -c 'import importlib.util; print(importlib.util.find_spec("lpips"));'
python3 -c 'import torch; assert torch.cuda.is_available()'

# Install Miniconda3 (glibc >=2.26)
# https://docs.conda.io/projects/miniconda/en/latest/index.html#quick-command-line-install
cd /home/deploy/ai-project/docker
bash ./Miniconda3-latest-Linux-x86_64.sh

# Debug sd_module.py
./cmd.sh
sed -iE'/clip_is_included_into_sd = any/{s/$/\n    print(clip_is_included_into_sd)/}'  ./modules/sd_models.py
python3 -u webui.py --listen

Downloads #

stable-diffusion-webui #

bash
# [extract] ./
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
git -C stable-diffusion-webui checkout 5ef669de080814067961f28357256e8fe27544f4
tar -czf stable-diffusion-webui.tar.gz stable-diffusion-webui

torch #

bash
# [pip] torch-2.0.1+cu118-cp310-cp310-linux_x86_64.whl
wget https://download.pytorch.org/whl/cu118/torch-2.0.1%2Bcu118-cp310-cp310-linux_x86_64.whl

# [copy] ./models/torch_deepdanbooru/model-resnet_custom_v3.pt
wget https://github.com/AUTOMATIC1111/TorchDeepDanbooru/releases/download/v1/model-resnet_custom_v3.pt

clip and open_clip #

bash
# [pip] clip.zip
wget -O clip.zip https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip

# [pip] open_clip.zip
wget -O open_clip.zip https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip

# [copy] ./openai/clip-vit-large-patch14/
GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/openai/clip-vit-large-patch14 openai--clip-vit-large-patch14
git -C openai--clip-vit-large-patch14 config --local http.proxy http://127.0.0.1:7890
git -C openai--clip-vit-large-patch14 config --local https.proxy http://127.0.0.1:7890
git -C openai--clip-vit-large-patch14 lfs pull
mv openai--clip-vit-large-patch14/.git openai--clip-vit-large-patch14--git

stable-diffusion-stability-ai #

bash
# [extract] ./repositories/stable-diffusion-stability-ai/
git clone https://github.com/Stability-AI/stablediffusion.git stable-diffusion-stability-ai
git -C stable-diffusion-stability-ai checkout cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf
tar -czf stable-diffusion-stability-ai.tar.gz stable-diffusion-stability-ai

generative-models #

bash
# [extract] ./repositories/generative-models/
git clone https://github.com/Stability-AI/generative-models.git generative-models
git -C generative-models checkout 45c443b316737a4ab6e40413d7794a7f5657c19f
tar -czf generative-models.tar.gz generative-models

k-diffusion #

bash
# [extract] ./repositories/k-diffusion/
git clone https://github.com/crowsonkb/k-diffusion.git k-diffusion
git -C k-diffusion checkout ab527a9a6d347f364e3d185ba6d714e22d80cb3c
tar -czf k-diffusion.tar.gz k-diffusion

CodeFormer #

bash
# [extract] ./repositories/CodeFormer/
git clone https://github.com/sczhou/CodeFormer.git CodeFormer
git -C CodeFormer checkout c5b4593074ba6214284d6acd5f1719b6c5d739af
tar -czf CodeFormer.tar.gz CodeFormer

BLIP #

bash
# [extract] ./repositories/BLIP/
git clone https://github.com/salesforce/BLIP.git BLIP
git -C BLIP checkout 48211a1594f1321b00f14c9f7a5b4813144b2fb9
tar -czf BLIP.tar.gz BLIP

# [copy] ./models/BLIP/model_base_caption_capfilt_large.pth
wget https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth
/usr/local/stable-diffusion-webui/models/BLIP/model_base_caption_capfilt_large.pth

GFPGAN #

bash
# [extract] ./repositories/GFPGAN/
git clone https://gitclone.com/github.com/TencentARC/GFPGAN
tar -czf GFPGAN.tar.gz GFPGAN

# [copy] ./models/GFPGAN/GFPGANv1.4.pth
wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth

# [copy] ??? face_detection_yunet.onnx ???
wget -O face_detection_yunet.onnx 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true'

taming-transformers #

bash
# [extract] ./repositories/taming-transformers/
git clone https://gitclone.com/github.com/CompVis/taming-transformers
mv taming-transformers/.git taming-transformers--git
tar -czf taming-transformers.tar.gz taming-transformers

sd-webui-controlnet #

bash
# [extract] ./extensions/sd-webui-controlnet/
get clone https://github.com/Mikubill/sd-webui-controlnet.git
tar -czf sd-webui-controlnet.tar.gz sd-webui-controlnet

# [copy] ./extensions/sd-webui-controlnet/models/sai_xl_canny_256lora.safetensors
# Find more models: https://huggingface.co/lllyasviel/sd_control_collection/resolve/main/
# Compare models: https://www.bilibili.com/read/cv26510303/
wget https://huggingface.co/lllyasviel/sd_control_collection/resolve/main/sai_xl_canny_256lora.safetensors

# [copy] ./extensions/sd-webui-controlnet/annotator/downloads/pidinet/table5_pidinet.pth
wget https://huggingface.co/lllyasviel/Annotators/resolve/main/table5_pidinet.pth

# [copy] ./extensions/sd-webui-controlnet/annotator/downloads/openpose/body_pose_model.pth
wget https://huggingface.co/lllyasviel/Annotators/resolve/main/body_pose_model.pth

# [copy] ./extensions/sd-webui-controlnet/annotator/downloads/oneformer/250_16_swin_l_oneformer_ade20k_160k.pth
wget https://huggingface.co/lllyasviel/Annotators/resolve/main/250_16_swin_l_oneformer_ade20k_160k.pth

# [copy] ./extensions/sd-webui-controlnet/annotator/downloads/midas/dpt_hybrid-midas-501f0c75.pt
wget https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/dpt_hybrid-midas-501f0c75.pt

# [copy] ./extensions/sd-webui-controlnet/annotator/downloads/normal_bae/scannet.pt
wget https://huggingface.co/lllyasviel/Annotators/resolve/main/scannet.pt

# [copy] ./extensions/sd-webui-controlnet/annotator/downloads/openpose/hand_pose_model.pth
wget https://huggingface.co/lllyasviel/Annotators/resolve/main/hand_pose_model.pth

# [copy] ./extensions/sd-webui-controlnet/annotator/downloads/openpose/facenet.pth
wget https://huggingface.co/lllyasviel/Annotators/resolve/main/facenet.pth

# [copy] ./extensions/sd-webui-controlnet/annotator/downloads/openpose/yolox_l.onnx
wget https://huggingface.co/yzd-v/DWPose/resolve/main/yolox_l.onnx

# [copy] ./extensions/sd-webui-controlnet/annotator/downloads/mlsd/mlsd_large_512_fp32.pth
wget https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/mlsd_large_512_fp32.pth

# [copy] ./extensions/sd-webui-controlnet/annotator/downloads/clip_vision/clip_g.pth
wget https://huggingface.co/lllyasviel/Annotators/resolve/main/clip_g.pth

# [copy] ./extensions/sd-webui-controlnet/annotator/downloads/clip_vision/clip_h.pth
wget -O clip_h.pth https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/pytorch_model.bin

# [copy] ./extensions/sd-webui-controlnet/annotator/downloads/clip_vision/clip_vitl.pth
wget -O clip_vitl.pth https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/pytorch_model.bin

# [copy] ./extensions/sd-webui-controlnet/annotator/downloads/leres/res101.pth
wget https://huggingface.co/lllyasviel/Annotators/resolve/main/res101.pth

# [copy] ./extensions/sd-webui-controlnet/annotator/downloads/leres/latest_net_G.pth
wget https://huggingface.co/lllyasviel/Annotators/resolve/main/latest_net_G.pth

# [copy] ./extensions/sd-webui-controlnet/annotator/downloads/zoedepth/ZoeD_M12_N.pt
wget https://huggingface.co/lllyasviel/Annotators/resolve/main/ZoeD_M12_N.pt

# [copy] ./extensions/sd-webui-controlnet/annotator/downloads/lineart/sk_model2.pth
wget https://huggingface.co/lllyasviel/Annotators/resolve/main/sk_model2.pth

sd-webui-prompt-all-in-one #

bash
# [extract] ./extensions/sd-webui-prompt-all-in-one/
git clone https://github.com/Physton/sd-webui-prompt-all-in-one.git
tar -czf sd-webui-prompt-all-in-one.tar.gz sd-webui-prompt-all-in-one

sd-webui-segment-anything #

bash
# [extract] ./extensions/sd-webui-segment-anything/
# Install guide: https://github.com/continue-revolution/sd-webui-segment-anything
git clone https://github.com/continue-revolution/sd-webui-segment-anything.git
tar -czf sd-webui-segment-anything.tar.gz sd-webui-segment-anything

# [copy] ./models/sam/sam_vit_h_4b8939.pth
# Find more models: https://github.com/continue-revolution/sd-webui-segment-anything
wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth

# [extract] ./bert-base-uncased/
# Solve the issue: https://github.com/continue-revolution/sd-webui-segment-anything/issues/138
GIT_LFS_SKIP_SMUDGE=1 proxychains4 git clone https://huggingface.co/bert-base-uncased
git -C bert-base-uncased config --local http.proxy http://127.0.0.1:7890
git -C bert-base-uncased config --local https.proxy http://127.0.0.1:7890
git -C bert-base-uncased lfs pull
mv bert-base-uncased/.git bert-base-uncased--git

# [pip] GroundingDINO.zip
git clone https://github.com/IDEA-Research/GroundingDINO
zip -r GroundingDINO.zip GroundingDINO

# [copy] ./extensions/sd-webui-segment-anything/models/grounding-dino/
GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/ShilongLiu/GroundingDINO grounding-dino
git -C grounding-dino config --local http.proxy http://127.0.0.1:7890
git -C grounding-dino config --local https.proxy http://127.0.0.1:7890
git -C grounding-dino lfs pull
mv grounding-dino/.git grounding-dino--git
# Fix a bug in samsd-webui-segment-anything/scripts/dino.py at line 20
cp grounding-dino/GroundingDINO_SwinT_OGC.cfg.py grounding-dino/GroundingDINO_SwinT_OGC.py

sd-webui-openpose-editor #

bash
# [extarct] ./extensions/sd-webui-openpose-editor/
git clone https://github.com/huchenlei/sd-webui-openpose-editor
tar -czf sd-webui-openpose-editor.tar.gz sd-webui-openpose-editor

# [extarct] ./extensions/sd-webui-openpose-editor/dist/
wget -O sd-webui-openpose-editor--dist.zip https://github.com/huchenlei/sd-webui-openpose-editor/releases/download/v0.2.1/dist.zip

# [copy] ./models/Stable-diffusion/ChilloutMix-ni-fp16.safetensors
wget https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/webui/ChilloutMix-ni-fp16.safetensors

# [copy] ./models/Stable-diffusion/SDXL_1.0_ArienMixXL_v2.0.safetensors
wget https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/webui/SDXL_1.0_ArienMixXL_v2.0.safetensors

# [copy] ./models/ControlNet/control_v11p_sd15_openpose.pth
wget https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/webui/control_v11p_sd15_openpose.pth

# [copy] ./models/ControlNet/control_v11p_sd15_canny.pth
wget https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/webui/control_v11p_sd15_canny.pth

# [copy] ./models/ControlNet/control_v11f1e_sd15_tile.pth
wget https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/webui/control_v11f1e_sd15_tile.pth

# [copy] ./models/ControlNet/control_sd15_random_color.pth
wget https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/webui/control_sd15_random_color.pth


# models/Lora/
wget https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/webui/FilmVelvia3.safetensors

wget https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/webui/vae-ft-mse-840000-ema-pruned.ckpt

wget https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/webui/face_skin.pth


sed -i 's/logging.basicConfig(level=logging.DEBUG/logging.basicConfig(level=logging.INFO/' ./extensions/sd-webui-openpose-editor/scripts/easyphoto_utils.py

stable-diffusion-webui-localization-zh_CN #

bash
# [extarct] ./extensions/stable-diffusion-webui-localization-zh_CN/
git clone https://github.com/dtlnor/stable-diffusion-webui-localization-zh_CN
tar -czf stable-diffusion-webui-localization-zh_CN.tar.gz stable-diffusion-webui-localization-zh_CN

TODO #

bash
#
container diff

#
volumes/openai--clip-vit-large-patch14  -->  volumes/openai/clip-vit-large-patch14

#
Edit Dockerfile run.env

# docker run:
# restart, mount-volumes, max-memory, mutable-entrypoint-script, api-port and web-port

References #

Ref: stable-diffusion-xl #

https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main

https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/tree/main

Ref: PyTorch #

https://pytorch.org/get-started/locally/

https://download.pytorch.org/whl/torch/

2025年8月5日