John6666 commited on
Commit
5a49af7
1 Parent(s): 83c14cb

Upload 4 files

Browse files
Files changed (4) hide show
  1. README.md +13 -14
  2. app.py +114 -114
  3. demo/animate.py +1 -1
  4. requirements.txt +124 -127
README.md CHANGED
@@ -1,15 +1,14 @@
1
- ---
2
- title: MagicAnimate
3
- emoji: 💃
4
- colorFrom: purple
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 4.10.0
8
- python_version: 3.11
9
- app_file: app.py
10
- models:
11
- - zcxu-eric/MagicAnimate
12
- - runwayml/stable-diffusion-v1-5
13
- - stabilityai/sd-vae-ft-mse
14
- pinned: false
15
  ---
 
1
+ ---
2
+ title: MagicAnimate
3
+ emoji: 💃
4
+ colorFrom: purple
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 4.43.0
8
+ app_file: app.py
9
+ models:
10
+ - zcxu-eric/MagicAnimate
11
+ - runwayml/stable-diffusion-v1-5
12
+ - stabilityai/sd-vae-ft-mse
13
+ pinned: false
 
14
  ---
app.py CHANGED
@@ -1,114 +1,114 @@
1
- # Copyright 2023 ByteDance and/or its affiliates.
2
- #
3
- # Copyright (2023) MagicAnimate Authors
4
- #
5
- # ByteDance, its affiliates and licensors retain all intellectual
6
- # property and proprietary rights in and to this material, related
7
- # documentation and any modifications thereto. Any use, reproduction,
8
- # disclosure or distribution of this material and related documentation
9
- # without an express license agreement from ByteDance or
10
- # its affiliates is strictly prohibited.
11
- import argparse
12
- import imageio
13
- import numpy as np
14
- import gradio as gr
15
- import os
16
- from PIL import Image
17
- from subprocess import PIPE, run
18
-
19
- from demo.animate import MagicAnimate
20
-
21
- from huggingface_hub import snapshot_download
22
-
23
- snapshot_download(repo_id="stable-diffusion-v1-5/stable-diffusion-v1-5", local_dir="./stable-diffusion-v1-5", ignore_patterns=["*.safetensors"])
24
- snapshot_download(repo_id="stabilityai/sd-vae-ft-mse", local_dir="./sd-vae-ft-mse")
25
- snapshot_download(repo_id="zcxu-eric/MagicAnimate", local_dir="./MagicAnimate")
26
-
27
- is_spaces = True if "SPACE_ID" in os.environ else False
28
- true_for_shared_ui = False #This will be true only if you are in a shared UI
29
- if(is_spaces):
30
- true_for_shared_ui = True if "zcxu-eric/magicanimate" in os.environ['SPACE_ID'] else False
31
-
32
-
33
- animator = MagicAnimate()
34
-
35
- def animate(reference_image, motion_sequence_state, seed=1, steps=25, guidance_scale=7.5):
36
- return animator(reference_image, motion_sequence_state, seed, steps, guidance_scale)
37
-
38
- with gr.Blocks() as demo:
39
-
40
- gr.HTML(
41
- """
42
- <div style="display: flex; justify-content: center; align-items: center; text-align: center;">
43
- <a href="https://github.com/magic-research/magic-animate" style="margin-right: 20px; text-decoration: none; display: flex; align-items: center;">
44
- </a>
45
- <div>
46
- <h1 >MagicAnimate: Temporally Consistent Human Image Animation using Diffusion Model</h1>
47
- <h5 style="margin: 0;">If you like our project, please give us a star ✨ on Github for the latest update.</h5>
48
- <div style="display: flex; justify-content: center; align-items: center; text-align: center;>
49
- <a href="https://arxiv.org/abs/2311.16498"><img src="https://img.shields.io/badge/Arxiv-2311.16498-red"></a>
50
- <a href='https://showlab.github.io/magicanimate'><img src='https://img.shields.io/badge/Project_Page-MagicAnimate-green' alt='Project Page'></a>
51
- <a href='https://github.com/magic-research/magic-animate'><img src='https://img.shields.io/badge/Github-Code-blue'></a>
52
- </div>
53
- </div>
54
- </div>
55
- """)
56
- animation = gr.Video(format="mp4", label="Animation Results", autoplay=True)
57
-
58
- with gr.Row():
59
- reference_image = gr.Image(label="Reference Image")
60
- motion_sequence = gr.Video(format="mp4", label="Motion Sequence",max_length=5)
61
-
62
- with gr.Column():
63
- random_seed = gr.Textbox(label="Random seed", value=1, info="default: -1")
64
- sampling_steps = gr.Textbox(label="Sampling steps", value=25, info="default: 25")
65
- guidance_scale = gr.Textbox(label="Guidance scale", value=7.5, info="default: 7.5")
66
- submit = gr.Button("Animate")
67
-
68
- def read_video(video):
69
- reader = imageio.get_reader(video)
70
- fps = reader.get_meta_data()['fps']
71
- return video
72
-
73
- def read_image(image, size=512):
74
- return np.array(Image.fromarray(image).resize((size, size)))
75
-
76
- # when user uploads a new video
77
- motion_sequence.upload(
78
- read_video,
79
- motion_sequence,
80
- motion_sequence,
81
- queue=False
82
- )
83
- # when `first_frame` is updated
84
- reference_image.upload(
85
- read_image,
86
- reference_image,
87
- reference_image,
88
- queue=False
89
- )
90
- # when the `submit` button is clicked
91
- submit.click(
92
- animate,
93
- [reference_image, motion_sequence, random_seed, sampling_steps, guidance_scale],
94
- animation
95
- )
96
-
97
- # Examples
98
- gr.Markdown("## Examples")
99
- gr.Examples(
100
- fn=animate,
101
- examples=[
102
- ["inputs/applications/source_image/monalisa.png", "inputs/applications/driving/densepose/running.mp4"],
103
- ["inputs/applications/source_image/demo4.png", "inputs/applications/driving/densepose/demo4.mp4"],
104
- ["inputs/applications/source_image/dalle2.jpeg", "inputs/applications/driving/densepose/running2.mp4"],
105
- ["inputs/applications/source_image/dalle8.jpeg", "inputs/applications/driving/densepose/dancing2.mp4"],
106
- ["inputs/applications/source_image/multi1_source.png", "inputs/applications/driving/densepose/multi_dancing.mp4"],
107
- ],
108
- inputs=[reference_image, motion_sequence],
109
- outputs=animation,
110
- cache_examples=true_for_shared_ui
111
- )
112
-
113
- # demo.queue(max_size=15, api_open=False)
114
- demo.launch(share=True, show_api=False)
 
1
+ # Copyright 2023 ByteDance and/or its affiliates.
2
+ #
3
+ # Copyright (2023) MagicAnimate Authors
4
+ #
5
+ # ByteDance, its affiliates and licensors retain all intellectual
6
+ # property and proprietary rights in and to this material, related
7
+ # documentation and any modifications thereto. Any use, reproduction,
8
+ # disclosure or distribution of this material and related documentation
9
+ # without an express license agreement from ByteDance or
10
+ # its affiliates is strictly prohibited.
11
+ import argparse
12
+ import imageio
13
+ import numpy as np
14
+ import gradio as gr
15
+ import os
16
+ from PIL import Image
17
+ from subprocess import PIPE, run
18
+
19
+ from demo.animate import MagicAnimate
20
+
21
+ from huggingface_hub import snapshot_download
22
+
23
+ snapshot_download(repo_id="stable-diffusion-v1-5/stable-diffusion-v1-5", local_dir="./stable-diffusion-v1-5", ignore_patterns=["*.safetensors"])
24
+ snapshot_download(repo_id="stabilityai/sd-vae-ft-mse", local_dir="./sd-vae-ft-mse")
25
+ snapshot_download(repo_id="zcxu-eric/MagicAnimate", local_dir="./MagicAnimate")
26
+
27
+ is_spaces = True if "SPACE_ID" in os.environ else False
28
+ true_for_shared_ui = False #This will be true only if you are in a shared UI
29
+ if(is_spaces):
30
+ true_for_shared_ui = True if "zcxu-eric/magicanimate" in os.environ['SPACE_ID'] else False
31
+
32
+
33
+ animator = MagicAnimate()
34
+
35
+ def animate(reference_image, motion_sequence_state, seed=1, steps=25, guidance_scale=7.5):
36
+ return animator(reference_image, motion_sequence_state, seed, steps, guidance_scale)
37
+
38
+ with gr.Blocks() as demo:
39
+
40
+ gr.HTML(
41
+ """
42
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center;">
43
+ <a href="https://github.com/magic-research/magic-animate" style="margin-right: 20px; text-decoration: none; display: flex; align-items: center;">
44
+ </a>
45
+ <div>
46
+ <h1 >MagicAnimate: Temporally Consistent Human Image Animation using Diffusion Model</h1>
47
+ <h5 style="margin: 0;">If you like our project, please give us a star ✨ on Github for the latest update.</h5>
48
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center;>
49
+ <a href="https://arxiv.org/abs/2311.16498"><img src="https://img.shields.io/badge/Arxiv-2311.16498-red"></a>
50
+ <a href='https://showlab.github.io/magicanimate'><img src='https://img.shields.io/badge/Project_Page-MagicAnimate-green' alt='Project Page'></a>
51
+ <a href='https://github.com/magic-research/magic-animate'><img src='https://img.shields.io/badge/Github-Code-blue'></a>
52
+ </div>
53
+ </div>
54
+ </div>
55
+ """)
56
+ animation = gr.Video(format="mp4", label="Animation Results", autoplay=True)
57
+
58
+ with gr.Row():
59
+ reference_image = gr.Image(label="Reference Image")
60
+ motion_sequence = gr.Video(format="mp4", label="Motion Sequence",max_length=5)
61
+
62
+ with gr.Column():
63
+ random_seed = gr.Textbox(label="Random seed", value=1, info="default: -1")
64
+ sampling_steps = gr.Textbox(label="Sampling steps", value=25, info="default: 25")
65
+ guidance_scale = gr.Textbox(label="Guidance scale", value=7.5, info="default: 7.5")
66
+ submit = gr.Button("Animate")
67
+
68
+ def read_video(video):
69
+ reader = imageio.get_reader(video)
70
+ fps = reader.get_meta_data()['fps']
71
+ return video
72
+
73
+ def read_image(image, size=512):
74
+ return np.array(Image.fromarray(image).resize((size, size)))
75
+
76
+ # when user uploads a new video
77
+ motion_sequence.upload(
78
+ read_video,
79
+ motion_sequence,
80
+ motion_sequence,
81
+ queue=False
82
+ )
83
+ # when `first_frame` is updated
84
+ reference_image.upload(
85
+ read_image,
86
+ reference_image,
87
+ reference_image,
88
+ queue=False
89
+ )
90
+ # when the `submit` button is clicked
91
+ submit.click(
92
+ animate,
93
+ [reference_image, motion_sequence, random_seed, sampling_steps, guidance_scale],
94
+ animation
95
+ )
96
+
97
+ # Examples
98
+ gr.Markdown("## Examples")
99
+ gr.Examples(
100
+ fn=animate,
101
+ examples=[
102
+ ["inputs/applications/source_image/monalisa.png", "inputs/applications/driving/densepose/running.mp4"],
103
+ ["inputs/applications/source_image/demo4.png", "inputs/applications/driving/densepose/demo4.mp4"],
104
+ ["inputs/applications/source_image/dalle2.jpeg", "inputs/applications/driving/densepose/running2.mp4"],
105
+ ["inputs/applications/source_image/dalle8.jpeg", "inputs/applications/driving/densepose/dancing2.mp4"],
106
+ ["inputs/applications/source_image/multi1_source.png", "inputs/applications/driving/densepose/multi_dancing.mp4"],
107
+ ],
108
+ inputs=[reference_image, motion_sequence],
109
+ outputs=animation,
110
+ cache_examples=true_for_shared_ui
111
+ )
112
+
113
+ # demo.queue(max_size=15, api_open=False)
114
+ demo.launch(share=True, show_api=False)
demo/animate.py CHANGED
@@ -64,7 +64,7 @@ class MagicAnimate():
64
  vae = AutoencoderKL.from_pretrained(config.pretrained_model_path, subfolder="vae")
65
 
66
  ### Load controlnet
67
- controlnet = ControlNetModel.from_pretrained(config.pretrained_controlnet_path)
68
 
69
  vae.to(torch.float16)
70
  unet.to(torch.float16)
 
64
  vae = AutoencoderKL.from_pretrained(config.pretrained_model_path, subfolder="vae")
65
 
66
  ### Load controlnet
67
+ controlnet = ControlNetModel.from_pretrained(config.pretrained_controlnet_path)
68
 
69
  vae.to(torch.float16)
70
  unet.to(torch.float16)
requirements.txt CHANGED
@@ -1,127 +1,124 @@
1
- absl-py==1.4.0
2
- accelerate==0.22.0
3
- aiofiles==23.2.1
4
- aiohttp==3.8.5
5
- aiosignal==1.3.1
6
- altair==5.0.1
7
- annotated-types==0.5.0
8
- antlr4-python3-runtime==4.9.3
9
- anyio==3.7.1
10
- async-timeout==4.0.3
11
- attrs==23.1.0
12
- cachetools==5.3.1
13
- certifi==2023.7.22
14
- charset-normalizer==3.2.0
15
- click==8.1.7
16
- cmake==3.27.2
17
- contourpy==1.1.0
18
- cycler==0.11.0
19
- datasets==2.14.4
20
- dill==0.3.7
21
- einops==0.6.1
22
- exceptiongroup==1.1.3
23
- fastapi==0.103.0
24
- ffmpy==0.3.1
25
- filelock==3.12.2
26
- fonttools==4.42.1
27
- frozenlist==1.4.0
28
- fsspec==2023.6.0
29
- google-auth==2.22.0
30
- google-auth-oauthlib==1.0.0
31
- gradio==3.41.2
32
- gradio-client==0.5.0
33
- grpcio==1.57.0
34
- h11==0.14.0
35
- httpcore==0.17.3
36
- httpx==0.24.1
37
- huggingface-hub==0.16.4
38
- idna==3.4
39
- importlib-metadata==6.8.0
40
- importlib-resources==6.0.1
41
- jinja2==3.1.2
42
- joblib==1.3.2
43
- jsonschema==4.19.0
44
- jsonschema-specifications==2023.7.1
45
- kiwisolver==1.4.5
46
- lightning-utilities==0.9.0
47
- lit==16.0.6
48
- markdown==3.4.4
49
- markupsafe==2.1.3
50
- matplotlib==3.7.2
51
- mpmath==1.3.0
52
- multidict==6.0.4
53
- multiprocess==0.70.15
54
- networkx==3.1
55
- numpy==1.24.4
56
- nvidia-cublas-cu11==11.10.3.66
57
- nvidia-cuda-cupti-cu11==11.7.101
58
- nvidia-cuda-nvrtc-cu11==11.7.99
59
- nvidia-cuda-runtime-cu11==11.7.99
60
- nvidia-cudnn-cu11==8.5.0.96
61
- nvidia-cufft-cu11==10.9.0.58
62
- nvidia-curand-cu11==10.2.10.91
63
- nvidia-cusolver-cu11==11.4.0.1
64
- nvidia-cusparse-cu11==11.7.4.91
65
- nvidia-nccl-cu11==2.14.3
66
- nvidia-nvtx-cu11==11.7.91
67
- oauthlib==3.2.2
68
- omegaconf==2.3.0
69
- opencv-python==4.8.0.76
70
- orjson==3.9.5
71
- pandas==2.0.3
72
- pillow==9.5.0
73
- pkgutil-resolve-name==1.3.10
74
- protobuf==4.24.2
75
- psutil==5.9.5
76
- pyarrow==13.0.0
77
- pyasn1==0.5.0
78
- pyasn1-modules==0.3.0
79
- pydantic==2.3.0
80
- pydantic-core==2.6.3
81
- pydub==0.25.1
82
- pyparsing==3.0.9
83
- python-multipart==0.0.6
84
- pytorch-lightning==2.0.7
85
- pytz==2023.3
86
- pyyaml==6.0.1
87
- referencing==0.30.2
88
- regex==2023.8.8
89
- requests==2.31.0
90
- requests-oauthlib==1.3.1
91
- rpds-py==0.9.2
92
- rsa==4.9
93
- safetensors==0.3.3
94
- semantic-version==2.10.0
95
- sniffio==1.3.0
96
- starlette==0.27.0
97
- sympy==1.12
98
- tensorboard==2.14.0
99
- tensorboard-data-server==0.7.1
100
- tokenizers==0.13.3
101
- toolz==0.12.0
102
- torchmetrics==1.1.0
103
- tqdm==4.66.1
104
- transformers==4.32.0
105
- triton==2.0.0
106
- tzdata==2023.3
107
- urllib3==1.26.16
108
- uvicorn==0.23.2
109
- websockets==11.0.3
110
- werkzeug==2.3.7
111
- xxhash==3.3.0
112
- yarl==1.9.2
113
- zipp==3.16.2
114
- decord
115
- imageio==2.9.0
116
- imageio-ffmpeg==0.4.3
117
- timm
118
- scipy
119
- scikit-image
120
- av
121
- imgaug
122
- lpips
123
- ffmpeg-python
124
- torch==2.0.1
125
- torchvision==0.15.2
126
- xformers==0.0.22
127
- diffusers==0.21.4
 
1
+ absl-py==1.4.0
2
+ accelerate==0.22.0
3
+ aiofiles==23.2.1
4
+ aiohttp==3.8.5
5
+ aiosignal==1.3.1
6
+ altair==5.0.1
7
+ annotated-types==0.5.0
8
+ antlr4-python3-runtime==4.9.3
9
+ anyio==3.7.1
10
+ async-timeout==4.0.3
11
+ attrs==23.1.0
12
+ cachetools==5.3.1
13
+ certifi==2023.7.22
14
+ charset-normalizer==3.2.0
15
+ click==8.1.7
16
+ cmake==3.27.2
17
+ contourpy==1.1.0
18
+ cycler==0.11.0
19
+ datasets==2.14.4
20
+ dill==0.3.7
21
+ einops==0.6.1
22
+ exceptiongroup==1.1.3
23
+ fastapi==0.103.0
24
+ ffmpy==0.3.1
25
+ filelock==3.12.2
26
+ fonttools==4.42.1
27
+ frozenlist==1.4.0
28
+ fsspec==2023.6.0
29
+ google-auth==2.22.0
30
+ google-auth-oauthlib==1.0.0
31
+ grpcio==1.57.0
32
+ h11==0.14.0
33
+ httpcore
34
+ httpx
35
+ huggingface-hub
36
+ idna==3.4
37
+ importlib-metadata==6.8.0
38
+ importlib-resources==6.0.1
39
+ jinja2==3.1.2
40
+ joblib==1.3.2
41
+ jsonschema==4.19.0
42
+ jsonschema-specifications==2023.7.1
43
+ kiwisolver==1.4.5
44
+ lightning-utilities==0.9.0
45
+ lit==16.0.6
46
+ markdown==3.4.4
47
+ markupsafe==2.1.3
48
+ matplotlib==3.7.2
49
+ mpmath==1.3.0
50
+ multidict==6.0.4
51
+ multiprocess==0.70.15
52
+ networkx==3.1
53
+ numpy==1.24.4
54
+ nvidia-cublas-cu11==11.10.3.66
55
+ nvidia-cuda-cupti-cu11==11.7.101
56
+ nvidia-cuda-nvrtc-cu11==11.7.99
57
+ nvidia-cuda-runtime-cu11==11.7.99
58
+ nvidia-cudnn-cu11==8.5.0.96
59
+ nvidia-cufft-cu11==10.9.0.58
60
+ nvidia-curand-cu11==10.2.10.91
61
+ nvidia-cusolver-cu11==11.4.0.1
62
+ nvidia-cusparse-cu11==11.7.4.91
63
+ nvidia-nccl-cu11==2.14.3
64
+ nvidia-nvtx-cu11==11.7.91
65
+ oauthlib==3.2.2
66
+ omegaconf==2.3.0
67
+ opencv-python==4.8.0.76
68
+ orjson==3.9.5
69
+ pandas==2.0.3
70
+ pillow==9.5.0
71
+ pkgutil-resolve-name==1.3.10
72
+ protobuf==4.24.2
73
+ psutil==5.9.5
74
+ pyarrow==13.0.0
75
+ pyasn1==0.5.0
76
+ pyasn1-modules==0.3.0
77
+ pydantic==2.3.0
78
+ pydantic-core==2.6.3
79
+ pydub==0.25.1
80
+ pyparsing==3.0.9
81
+ python-multipart==0.0.6
82
+ pytorch-lightning==2.0.7
83
+ pytz==2023.3
84
+ pyyaml==6.0.1
85
+ referencing==0.30.2
86
+ regex
87
+ requests
88
+ requests-oauthlib
89
+ rpds-py==0.9.2
90
+ rsa==4.9
91
+ safetensors==0.3.3
92
+ semantic-version==2.10.0
93
+ sniffio==1.3.0
94
+ starlette==0.27.0
95
+ sympy==1.12
96
+ tensorboard==2.14.0
97
+ tensorboard-data-server==0.7.1
98
+ tokenizers==0.13.3
99
+ toolz==0.12.0
100
+ torchmetrics==1.1.0
101
+ tqdm
102
+ transformers==4.32.0
103
+ triton==2.0.0
104
+ tzdata==2023.3
105
+ urllib3==1.26.16
106
+ uvicorn==0.23.2
107
+ websockets==11.0.3
108
+ werkzeug==2.3.7
109
+ xxhash==3.3.0
110
+ yarl==1.9.2
111
+ zipp==3.16.2
112
+ decord
113
+ imageio==2.9.0
114
+ imageio-ffmpeg==0.4.3
115
+ timm
116
+ scipy
117
+ scikit-image
118
+ av
119
+ imgaug
120
+ lpips
121
+ ffmpeg-python
122
+ torch==2.0.1
123
+ torchvision==0.15.2
124
+ diffusers==0.21.4