Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ Our model checkpoints trained on [VITON-HD](https://github.com/shadow2496/VITON-
![workflow](images/workflow.png) 

## Installation
### Source
1. Clone the repository

```sh
Expand All @@ -36,6 +37,15 @@ pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2
pip install -r requirements.txt
```

### Docker
make sure install [docker](https://docs.docker.com/engine/install/ubuntu/) and [nvidia-docker2](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
```sh
make build # build a image version
make test # Test whether the built image can enable CUDA
make deploy # Deploy ootd project
make exec # Enter the ootd container (configure the environment + download all necessary models in the checkpoints folder by yourself), follow the Inference in the readme
```

## Inference
1. Half-body model

Expand Down
53 changes: 53 additions & 0 deletions dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
# (以wubuntu22.04 & python=3.10 & CUDA=11.8为例):
FROM nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu22.04
# 安装git
RUN apt-get update
# 设置工作目录(容器内的指定目录)
WORKDIR /app

### 安装python 3.10.8 和 pip
ENV TZ=Asia/Shanghai
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone

RUN apt-get install -y --no-install-recommends \
build-essential \
libffi-dev \
libssl-dev \
zlib1g-dev \
libbz2-dev \
libreadline-dev \
libsqlite3-dev \
wget \
curl \
llvm \
libncursesw5-dev \
tk-dev \
libxml2-dev \
libxmlsec1-dev \
liblzma-dev \
ca-certificates \
&& curl -O https://www.python.org/ftp/python/3.10.8/Python-3.10.8.tgz \
&& tar -xzf Python-3.10.8.tgz \
&& cd Python-3.10.8 \
&& ./configure --enable-optimizations \
&& make -j $(nproc) \
&& make altinstall \
&& cd .. \
&& rm -rf Python-3.10.8.tgz Python-3.10.8 \
&& ln -s /usr/local/bin/python3.10 /usr/local/bin/python \
&& ln -s /usr/local/bin/pip3.10 /usr/local/bin/pip \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*

RUN pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 -i https://mirrors.aliyun.com/pypi/simple/\
&& apt-get update && apt-get install -y libgl1-mesa-glx

# 克隆特定版本的代码
COPY . /app

RUN pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/





44 changes: 44 additions & 0 deletions makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
all: help

help:
@echo ""
@echo " -- Help Menu: step by step"
@echo ""
@echo " 1. make build - build a image version"
@echo " 2. make test - Test whether the built image can enable CUDA"
@echo " 3. make deploy - Deploy ootd project"
@echo " 4. make exec - Enter the ootd container(The container has already configured the software environment), please download all necessary models in the checkpoints folder by yourself, follow the Inference in the readme"
@echo ""

username=st
BASE_IMAGE_NAME=nvidia/cuda:12.1.1-cudnn8-runtime-ubuntu22.04
NAME=ootd
VERSION=v0.0.2
CONTAINER_NAME=ootd
half:
@python run_ootd.py --model_path ./examples/model/01008_00.jpg --cloth_path ./examples/garment/00055_00.jpg --scale 2.0 --sample 4
exec:
@docker exec -it ${CONTAINER_NAME} /bin/bash

deploy:
@docker run -itd --gpus all \
--name ${CONTAINER_NAME} \
-p 7865:7865 \
-v .:/app \
${NAME}:${VERSION} /bin/bash
test:
@docker run -it --rm \
--gpus all \
--name test \
${NAME}:${VERSION} nvidia-smi
build:
@docker build -t ${NAME}:${VERSION} .

stop:
@docker stop ${CONTAINER_NAME}
clean: stop
@docker rm ${CONTAINER_NAME}

hello:
@echo "makefile,$(username), hello world !"