Spaces:
Runtime error
Runtime error
cocktailpeanut
commited on
Commit
•
c2a3eed
1
Parent(s):
b9778c9
update
Browse files- app.py +11 -5
- requirements.txt +5 -5
app.py
CHANGED
@@ -36,6 +36,12 @@ css = """
|
|
36 |
height: 2.5em;
|
37 |
}
|
38 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
|
41 |
class AnimateController:
|
@@ -94,11 +100,11 @@ class AnimateController:
|
|
94 |
self.tokenizer = CLIPTokenizer.from_pretrained(
|
95 |
stable_diffusion_dropdown, subfolder="tokenizer")
|
96 |
self.text_encoder = CLIPTextModel.from_pretrained(
|
97 |
-
stable_diffusion_dropdown, subfolder="text_encoder").
|
98 |
self.vae = AutoencoderKL.from_pretrained(
|
99 |
-
stable_diffusion_dropdown, subfolder="vae").
|
100 |
self.unet = UNet3DConditionModel.from_pretrained_2d(
|
101 |
-
stable_diffusion_dropdown, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).
|
102 |
return gr.Dropdown.update()
|
103 |
|
104 |
def update_motion_module(self, motion_module_dropdown):
|
@@ -181,7 +187,7 @@ class AnimateController:
|
|
181 |
vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet,
|
182 |
scheduler=scheduler_dict[sampler_dropdown](
|
183 |
**OmegaConf.to_container(self.inference_config.noise_scheduler_kwargs))
|
184 |
-
).to(
|
185 |
|
186 |
if self.lora_model_state_dict != {}:
|
187 |
pipeline = convert_lora(
|
@@ -190,7 +196,7 @@ class AnimateController:
|
|
190 |
pipeline.unet = convert_lcm_lora(copy.deepcopy(
|
191 |
self.unet), self.lcm_lora_path, spatial_lora_slider)
|
192 |
|
193 |
-
pipeline.to(
|
194 |
|
195 |
if seed_textbox != -1 and seed_textbox != "":
|
196 |
torch.manual_seed(int(seed_textbox))
|
|
|
36 |
height: 2.5em;
|
37 |
}
|
38 |
"""
|
39 |
+
if torch.backends.mps.is_available():
|
40 |
+
device = "mps"
|
41 |
+
elif torch.cuda.is_available():
|
42 |
+
device = "cuda"
|
43 |
+
else:
|
44 |
+
device = "cpu"
|
45 |
|
46 |
|
47 |
class AnimateController:
|
|
|
100 |
self.tokenizer = CLIPTokenizer.from_pretrained(
|
101 |
stable_diffusion_dropdown, subfolder="tokenizer")
|
102 |
self.text_encoder = CLIPTextModel.from_pretrained(
|
103 |
+
stable_diffusion_dropdown, subfolder="text_encoder").to(device)
|
104 |
self.vae = AutoencoderKL.from_pretrained(
|
105 |
+
stable_diffusion_dropdown, subfolder="vae").to(device)
|
106 |
self.unet = UNet3DConditionModel.from_pretrained_2d(
|
107 |
+
stable_diffusion_dropdown, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).to(device)
|
108 |
return gr.Dropdown.update()
|
109 |
|
110 |
def update_motion_module(self, motion_module_dropdown):
|
|
|
187 |
vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet,
|
188 |
scheduler=scheduler_dict[sampler_dropdown](
|
189 |
**OmegaConf.to_container(self.inference_config.noise_scheduler_kwargs))
|
190 |
+
).to(device)
|
191 |
|
192 |
if self.lora_model_state_dict != {}:
|
193 |
pipeline = convert_lora(
|
|
|
196 |
pipeline.unet = convert_lcm_lora(copy.deepcopy(
|
197 |
self.unet), self.lcm_lora_path, spatial_lora_slider)
|
198 |
|
199 |
+
pipeline.to(device)
|
200 |
|
201 |
if seed_textbox != -1 and seed_textbox != "":
|
202 |
torch.manual_seed(int(seed_textbox))
|
requirements.txt
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
-
torch==1.13.1
|
2 |
-
torchvision==0.14.1
|
3 |
-
torchaudio==0.13.1
|
4 |
diffusers==0.11.1
|
5 |
transformers==4.25.1
|
6 |
-
xformers==0.0.16
|
7 |
imageio==2.27.0
|
8 |
gradio==3.48.0
|
9 |
gdown
|
@@ -12,4 +12,4 @@ omegaconf
|
|
12 |
safetensors
|
13 |
imageio[ffmpeg]
|
14 |
imageio[pyav]
|
15 |
-
accelerate
|
|
|
1 |
+
#torch==1.13.1
|
2 |
+
#torchvision==0.14.1
|
3 |
+
#torchaudio==0.13.1
|
4 |
diffusers==0.11.1
|
5 |
transformers==4.25.1
|
6 |
+
#xformers==0.0.16
|
7 |
imageio==2.27.0
|
8 |
gradio==3.48.0
|
9 |
gdown
|
|
|
12 |
safetensors
|
13 |
imageio[ffmpeg]
|
14 |
imageio[pyav]
|
15 |
+
accelerate
|