Spaces:
Running
Running
Ming Li
commited on
Commit
•
3da112e
1
Parent(s):
a0dd522
add Decorator for each condition
Browse files
model.py
CHANGED
@@ -44,6 +44,7 @@ class Model:
|
|
44 |
self.pipe = self.load_pipe(base_model_id, task_name)
|
45 |
self.preprocessor = Preprocessor()
|
46 |
|
|
|
47 |
def load_pipe(self, base_model_id: str, task_name) -> DiffusionPipeline:
|
48 |
if (
|
49 |
base_model_id == self.base_model_id
|
@@ -58,8 +59,8 @@ class Model:
|
|
58 |
base_model_id, safety_checker=None, controlnet=controlnet, torch_dtype=torch.float32
|
59 |
)
|
60 |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
61 |
-
if self.device.type == "cuda":
|
62 |
-
|
63 |
pipe.to(self.device)
|
64 |
|
65 |
torch.cuda.empty_cache()
|
@@ -80,6 +81,7 @@ class Model:
|
|
80 |
self.pipe = self.load_pipe(self.base_model_id, self.task_name)
|
81 |
return self.base_model_id
|
82 |
|
|
|
83 |
def load_controlnet_weight(self, task_name: str) -> None:
|
84 |
if task_name == self.task_name:
|
85 |
return
|
|
|
44 |
self.pipe = self.load_pipe(base_model_id, task_name)
|
45 |
self.preprocessor = Preprocessor()
|
46 |
|
47 |
+
@spaces.GPU()
|
48 |
def load_pipe(self, base_model_id: str, task_name) -> DiffusionPipeline:
|
49 |
if (
|
50 |
base_model_id == self.base_model_id
|
|
|
59 |
base_model_id, safety_checker=None, controlnet=controlnet, torch_dtype=torch.float32
|
60 |
)
|
61 |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
62 |
+
# if self.device.type == "cuda":
|
63 |
+
# pipe.disable_xformers_memory_efficient_attention()
|
64 |
pipe.to(self.device)
|
65 |
|
66 |
torch.cuda.empty_cache()
|
|
|
81 |
self.pipe = self.load_pipe(self.base_model_id, self.task_name)
|
82 |
return self.base_model_id
|
83 |
|
84 |
+
@spaces.GPU()
|
85 |
def load_controlnet_weight(self, task_name: str) -> None:
|
86 |
if task_name == self.task_name:
|
87 |
return
|