From 5608b9716e16af2cfb1d908c459c0df19d5f254e Mon Sep 17 00:00:00 2001 From: VinMing Date: Mon, 18 Mar 2024 13:46:24 +0800 Subject: [PATCH] add Dockerfile --- README.md | 10 ++++++++++ dockerfile | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ makefile | 44 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 107 insertions(+) create mode 100644 dockerfile create mode 100644 makefile diff --git a/README.md b/README.md index 950f15c..305d784 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,7 @@ Our model checkpoints trained on [VITON-HD](https://github.com/shadow2496/VITON- ![workflow](images/workflow.png)  ## Installation +### Source 1. Clone the repository ```sh @@ -36,6 +37,15 @@ pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 pip install -r requirements.txt ``` +### Docker +make sure install [docker](https://docs.docker.com/engine/install/ubuntu/) and [nvidia-docker2](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) +```sh +make build # build a image version +make test # Test whether the built image can enable CUDA +make deploy # Deploy ootd project +make exec # Enter the ootd container (configure the environment + download all necessary models in the checkpoints folder by yourself), follow the Inference in the readme +``` + ## Inference 1. Half-body model diff --git a/dockerfile b/dockerfile new file mode 100644 index 0000000..cc91974 --- /dev/null +++ b/dockerfile @@ -0,0 +1,53 @@ +# (以wubuntu22.04 & python=3.10 & CUDA=11.8为例): +FROM nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu22.04 +# 安装git +RUN apt-get update +# 设置工作目录(容器内的指定目录) +WORKDIR /app + +### 安装python 3.10.8 和 pip +ENV TZ=Asia/Shanghai +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +RUN apt-get install -y --no-install-recommends \ + build-essential \ + libffi-dev \ + libssl-dev \ + zlib1g-dev \ + libbz2-dev \ + libreadline-dev \ + libsqlite3-dev \ + wget \ + curl \ + llvm \ + libncursesw5-dev \ + tk-dev \ + libxml2-dev \ + libxmlsec1-dev \ + liblzma-dev \ + ca-certificates \ + && curl -O https://www.python.org/ftp/python/3.10.8/Python-3.10.8.tgz \ + && tar -xzf Python-3.10.8.tgz \ + && cd Python-3.10.8 \ + && ./configure --enable-optimizations \ + && make -j $(nproc) \ + && make altinstall \ + && cd .. \ + && rm -rf Python-3.10.8.tgz Python-3.10.8 \ + && ln -s /usr/local/bin/python3.10 /usr/local/bin/python \ + && ln -s /usr/local/bin/pip3.10 /usr/local/bin/pip \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +RUN pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 -i https://mirrors.aliyun.com/pypi/simple/\ + && apt-get update && apt-get install -y libgl1-mesa-glx + +# 克隆特定版本的代码 +COPY . /app + +RUN pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ + + + + + diff --git a/makefile b/makefile new file mode 100644 index 0000000..eb77593 --- /dev/null +++ b/makefile @@ -0,0 +1,44 @@ +all: help + +help: + @echo "" + @echo " -- Help Menu: step by step" + @echo "" + @echo " 1. make build - build a image version" + @echo " 2. make test - Test whether the built image can enable CUDA" + @echo " 3. make deploy - Deploy ootd project" + @echo " 4. make exec - Enter the ootd container(The container has already configured the software environment), please download all necessary models in the checkpoints folder by yourself, follow the Inference in the readme" + @echo "" + +username=st +BASE_IMAGE_NAME=nvidia/cuda:12.1.1-cudnn8-runtime-ubuntu22.04 +NAME=ootd +VERSION=v0.0.2 +CONTAINER_NAME=ootd +half: + @python run_ootd.py --model_path ./examples/model/01008_00.jpg --cloth_path ./examples/garment/00055_00.jpg --scale 2.0 --sample 4 +exec: + @docker exec -it ${CONTAINER_NAME} /bin/bash + +deploy: + @docker run -itd --gpus all \ + --name ${CONTAINER_NAME} \ + -p 7865:7865 \ + -v .:/app \ + ${NAME}:${VERSION} /bin/bash +test: + @docker run -it --rm \ + --gpus all \ + --name test \ + ${NAME}:${VERSION} nvidia-smi +build: + @docker build -t ${NAME}:${VERSION} . + +stop: + @docker stop ${CONTAINER_NAME} +clean: stop + @docker rm ${CONTAINER_NAME} + +hello: + @echo "makefile,$(username), hello world !" +