diff --git a/README.md b/README.md index 59887cc..57f0ede 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ https://github.com/TencentARC/InstantMesh/assets/20635237/dab3511e-e7c6-4c0b-bab - [x] Release inference and training code. - [x] Release model weights. - [x] Release huggingface gradio demo. Please try it at [demo](https://huggingface.co/spaces/TencentARC/InstantMesh) link. -- [ ] Add support to low-memory GPU environment. +- [x] Add support to low-memory GPU environment. - [ ] Add support to more multi-view diffusion models. # ⚙️ Dependencies and Installation diff --git a/app.py b/app.py index 032f112..c74140d 100644 --- a/app.py +++ b/app.py @@ -86,6 +86,7 @@ pipeline = DiffusionPipeline.from_pretrained( "sudo-ai/zero123plus-v1.2", custom_pipeline="zero123plus", torch_dtype=torch.float16, + cache_dir=model_cache_dir ) pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config( pipeline.scheduler.config, timestep_spacing='trailing' diff --git a/docker/README.md b/docker/README.md index 3062135..68646d0 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,13 +1,22 @@ # Docker setup -This docker setup is tested on WSL(Ubuntu). +This docker setup is tested on Ubuntu. make sure you are under directory yourworkspace/instantmesh/ -run +Build docker image: -`docker build -t instantmesh/deploy:cuda12.1 -f docker/Dockerfile .` +```bash +docker build -t instantmesh -f docker/Dockerfile . +``` -then run +Run docker image with a local model cache (so it is fast when container is started next time): -`docker run --gpus all -it instantmesh/deploy:cuda12.1` \ No newline at end of file +```bash +mkdir -p $HOME/models/ +export MODEL_DIR=$HOME/models/ + +docker run -it -p 43839:43839 --platform=linux/amd64 --gpus all -v $MODEL_DIR:/workspace/instantmesh/models instantmesh +``` + +Navigate to `http://localhost:43839` to use the demo.