radames commited on
Commit
c01188e
1 Parent(s): 43462a5

sdxl loras

Browse files
frontend/src/lib/components/Selectlist.svelte CHANGED
@@ -9,7 +9,7 @@
9
  </script>
10
 
11
  <div class="grid max-w-md grid-cols-4 items-center justify-items-start gap-3">
12
- <label for="model-list" class="font-medium">{params?.title} </label>
13
  {#if params?.values}
14
  <select
15
  bind:value
 
9
  </script>
10
 
11
  <div class="grid max-w-md grid-cols-4 items-center justify-items-start gap-3">
12
+ <label for="model-list" class="text-sm font-medium">{params?.title} </label>
13
  {#if params?.values}
14
  <select
15
  bind:value
frontend/src/lib/components/TextArea.svelte CHANGED
@@ -8,11 +8,16 @@
8
  });
9
  </script>
10
 
11
- <div class="text-normal flex items-center rounded-md border border-gray-700 px-1 py-1">
12
- <textarea
13
- class="mx-1 w-full px-3 py-2 font-light outline-none dark:text-black"
14
- title={params?.title}
15
- placeholder="Add your prompt here..."
16
- bind:value
17
- ></textarea>
 
 
 
 
 
18
  </div>
 
8
  });
9
  </script>
10
 
11
+ <div class="px-1 py-1">
12
+ <label class="text-sm font-medium" for={params?.title}>
13
+ {params?.title}
14
+ </label>
15
+ <div class="text-normal flex items-center rounded-md border border-gray-700">
16
+ <textarea
17
+ class="mx-1 w-full px-3 py-2 font-light outline-none dark:text-black"
18
+ title={params?.title}
19
+ placeholder="Add your prompt here..."
20
+ bind:value
21
+ ></textarea>
22
+ </div>
23
  </div>
frontend/src/routes/+page.svelte CHANGED
@@ -16,7 +16,7 @@
16
  let pipelineInfo: PipelineInfo;
17
  let isImageMode: boolean = false;
18
  let maxQueueSize: number = 0;
19
-
20
  onMount(() => {
21
  getSettings();
22
  });
@@ -28,6 +28,16 @@
28
  isImageMode = pipelineInfo.input_mode.default === PipelineMode.IMAGE;
29
  maxQueueSize = settings.max_queue_size;
30
  pipelineParams = pipelineParams.filter((e) => e?.disabled !== true);
 
 
 
 
 
 
 
 
 
 
31
  }
32
 
33
  function getSreamdata() {
@@ -59,14 +69,14 @@
59
  }
60
  </script>
61
 
62
- <div class="fixed right-2 top-2 max-w-xs rounded-lg p-4 text-center text-sm font-bold" id="error" />
63
  <main class="container mx-auto flex max-w-4xl flex-col gap-3 px-4 py-4">
64
- <article class="flex- mx-auto max-w-xl text-center">
65
  <h1 class="text-3xl font-bold">Real-Time Latent Consistency Model</h1>
66
  {#if pipelineInfo?.title?.default}
67
  <h3 class="text-xl font-bold">{pipelineInfo?.title?.default}</h3>
68
  {/if}
69
- <p class="py-2 text-sm">
70
  This demo showcases
71
  <a
72
  href="https://huggingface.co/blog/lcm_lora"
@@ -80,10 +90,17 @@
80
  class="text-blue-500 underline hover:no-underline">Diffusers</a
81
  > with a MJPEG stream server.
82
  </p>
 
 
 
 
 
 
 
83
  {#if maxQueueSize > 0}
84
  <p class="text-sm">
85
- There are <span id="queue_size" class="font-bold">0</span> user(s) sharing the same GPU,
86
- affecting real-time performance. Maximum queue size is {maxQueueSize}.
87
  <a
88
  href="https://huggingface.co/spaces/radames/Real-Time-Latent-Consistency-Model?duplicate=true"
89
  target="_blank"
@@ -93,16 +110,6 @@
93
  {/if}
94
  </article>
95
  {#if pipelineParams}
96
- <header>
97
- <h2 class="font-medium">Prompt</h2>
98
- <p class="text-sm text-gray-500">
99
- Change the prompt to generate different images, accepts <a
100
- href="https://github.com/damian0815/compel/blob/main/doc/syntax.md"
101
- target="_blank"
102
- class="text-blue-500 underline hover:no-underline">Compel</a
103
- > syntax.
104
- </p>
105
- </header>
106
  <PipelineOptions {pipelineParams}></PipelineOptions>
107
  <div class="flex gap-3">
108
  <Button on:click={toggleLcmLive} {disabled}>
 
16
  let pipelineInfo: PipelineInfo;
17
  let isImageMode: boolean = false;
18
  let maxQueueSize: number = 0;
19
+ let currentQueueSize: number = 0;
20
  onMount(() => {
21
  getSettings();
22
  });
 
28
  isImageMode = pipelineInfo.input_mode.default === PipelineMode.IMAGE;
29
  maxQueueSize = settings.max_queue_size;
30
  pipelineParams = pipelineParams.filter((e) => e?.disabled !== true);
31
+ if (maxQueueSize > 0) {
32
+ getQueueSize();
33
+ setInterval(() => {
34
+ getQueueSize();
35
+ }, 2000);
36
+ }
37
+ }
38
+ async function getQueueSize() {
39
+ const data = await fetch(`${PUBLIC_BASE_URL}/queue_size`).then((r) => r.json());
40
+ currentQueueSize = data.queue_size;
41
  }
42
 
43
  function getSreamdata() {
 
69
  }
70
  </script>
71
 
72
+ <div class="fixed right-2 top-2 max-w-xs rounded-lg p-4 text-sm font-bold" id="error" />
73
  <main class="container mx-auto flex max-w-4xl flex-col gap-3 px-4 py-4">
74
+ <article class="text-center">
75
  <h1 class="text-3xl font-bold">Real-Time Latent Consistency Model</h1>
76
  {#if pipelineInfo?.title?.default}
77
  <h3 class="text-xl font-bold">{pipelineInfo?.title?.default}</h3>
78
  {/if}
79
+ <p class="text-sm">
80
  This demo showcases
81
  <a
82
  href="https://huggingface.co/blog/lcm_lora"
 
90
  class="text-blue-500 underline hover:no-underline">Diffusers</a
91
  > with a MJPEG stream server.
92
  </p>
93
+ <p class="text-sm text-gray-500">
94
+ Change the prompt to generate different images, accepts <a
95
+ href="https://github.com/damian0815/compel/blob/main/doc/syntax.md"
96
+ target="_blank"
97
+ class="text-blue-500 underline hover:no-underline">Compel</a
98
+ > syntax.
99
+ </p>
100
  {#if maxQueueSize > 0}
101
  <p class="text-sm">
102
+ There are <span id="queue_size" class="font-bold">{currentQueueSize}</span>
103
+ user(s) sharing the same GPU, affecting real-time performance. Maximum queue size is {maxQueueSize}.
104
  <a
105
  href="https://huggingface.co/spaces/radames/Real-Time-Latent-Consistency-Model?duplicate=true"
106
  target="_blank"
 
110
  {/if}
111
  </article>
112
  {#if pipelineParams}
 
 
 
 
 
 
 
 
 
 
113
  <PipelineOptions {pipelineParams}></PipelineOptions>
114
  <div class="flex gap-3">
115
  <Button on:click={toggleLcmLive} {disabled}>
pipelines/controlnetLoraSDXL.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import (
2
+ StableDiffusionXLControlNetImg2ImgPipeline,
3
+ ControlNetModel,
4
+ LCMScheduler,
5
+ AutoencoderKL,
6
+ )
7
+ from compel import Compel, ReturnedEmbeddingsType
8
+ import torch
9
+ from pipelines.utils.canny_gpu import SobelOperator
10
+
11
+ try:
12
+ import intel_extension_for_pytorch as ipex # type: ignore
13
+ except:
14
+ pass
15
+
16
+ import psutil
17
+ from config import Args
18
+ from pydantic import BaseModel, Field
19
+ from PIL import Image
20
+
21
+ controlnet_model = "diffusers/controlnet-canny-sdxl-1.0"
22
+ model_id = "stabilityai/stable-diffusion-xl-base-1.0"
23
+ lcm_lora_id = "latent-consistency/lcm-lora-sdxl"
24
+
25
+ # # base model with activation token, it will prepend the prompt with the activation token
26
+ base_models = {
27
+ "plasmo/woolitize": "woolitize",
28
+ "nitrosocke/Ghibli-Diffusion": "ghibli style",
29
+ "nitrosocke/mo-di-diffusion": "modern disney style",
30
+ }
31
+ # lcm_lora_id = "latent-consistency/lcm-lora-sdv1-5"
32
+
33
+
34
+ default_prompt = "Portrait of The Terminator with , glare pose, detailed, intricate, full of colour, cinematic lighting, trending on artstation, 8k, hyperrealistic, focused, extreme details, unreal engine 5 cinematic, masterpiece"
35
+ default_negative_prompt = "blurry, low quality, render, 3D, oversaturated"
36
+
37
+
38
+ class Pipeline:
39
+ class Info(BaseModel):
40
+ name: str = "controlnet+loras+sdxl"
41
+ title: str = "SDXL + LCM + LoRA + Controlnet "
42
+ description: str = "Generates an image from a text prompt"
43
+ input_mode: str = "image"
44
+
45
+ class InputParams(BaseModel):
46
+ prompt: str = Field(
47
+ default_prompt,
48
+ title="Prompt",
49
+ field="textarea",
50
+ id="prompt",
51
+ )
52
+ model_id: str = Field(
53
+ "plasmo/woolitize",
54
+ title="Base Model",
55
+ values=list(base_models.keys()),
56
+ field="select",
57
+ id="model_id",
58
+ )
59
+ negative_prompt: str = Field(
60
+ default_negative_prompt,
61
+ title="Negative Prompt",
62
+ field="textarea",
63
+ id="negative_prompt",
64
+ hide=True,
65
+ )
66
+ seed: int = Field(
67
+ 2159232, min=0, title="Seed", field="seed", hide=True, id="seed"
68
+ )
69
+ steps: int = Field(
70
+ 4, min=2, max=15, title="Steps", field="range", hide=True, id="steps"
71
+ )
72
+ width: int = Field(
73
+ 512, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
74
+ )
75
+ height: int = Field(
76
+ 512, min=2, max=15, title="Height", disabled=True, hide=True, id="height"
77
+ )
78
+ guidance_scale: float = Field(
79
+ 1.0,
80
+ min=0,
81
+ max=20,
82
+ step=0.001,
83
+ title="Guidance Scale",
84
+ field="range",
85
+ hide=True,
86
+ id="guidance_scale",
87
+ )
88
+ strength: float = Field(
89
+ 0.5,
90
+ min=0.25,
91
+ max=1.0,
92
+ step=0.001,
93
+ title="Strength",
94
+ field="range",
95
+ hide=True,
96
+ id="strength",
97
+ )
98
+ controlnet_scale: float = Field(
99
+ 0.5,
100
+ min=0,
101
+ max=1.0,
102
+ step=0.001,
103
+ title="Controlnet Scale",
104
+ field="range",
105
+ hide=True,
106
+ id="controlnet_scale",
107
+ )
108
+ controlnet_start: float = Field(
109
+ 0.0,
110
+ min=0,
111
+ max=1.0,
112
+ step=0.001,
113
+ title="Controlnet Start",
114
+ field="range",
115
+ hide=True,
116
+ id="controlnet_start",
117
+ )
118
+ controlnet_end: float = Field(
119
+ 1.0,
120
+ min=0,
121
+ max=1.0,
122
+ step=0.001,
123
+ title="Controlnet End",
124
+ field="range",
125
+ hide=True,
126
+ id="controlnet_end",
127
+ )
128
+ canny_low_threshold: float = Field(
129
+ 0.31,
130
+ min=0,
131
+ max=1.0,
132
+ step=0.001,
133
+ title="Canny Low Threshold",
134
+ field="range",
135
+ hide=True,
136
+ id="canny_low_threshold",
137
+ )
138
+ canny_high_threshold: float = Field(
139
+ 0.125,
140
+ min=0,
141
+ max=1.0,
142
+ step=0.001,
143
+ title="Canny High Threshold",
144
+ field="range",
145
+ hide=True,
146
+ id="canny_high_threshold",
147
+ )
148
+ debug_canny: bool = Field(
149
+ False,
150
+ title="Debug Canny",
151
+ field="checkbox",
152
+ hide=True,
153
+ id="debug_canny",
154
+ )
155
+
156
+ def __init__(self, args: Args, device: torch.device, torch_dtype: torch.dtype):
157
+ controlnet_canny = ControlNetModel.from_pretrained(
158
+ controlnet_model, torch_dtype=torch_dtype
159
+ ).to(device)
160
+ vae = AutoencoderKL.from_pretrained(
161
+ "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch_dtype
162
+ )
163
+ if args.safety_checker:
164
+ self.pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained(
165
+ model_id,
166
+ controlnet=controlnet_canny,
167
+ vae=vae,
168
+ )
169
+ else:
170
+ self.pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained(
171
+ model_id,
172
+ safety_checker=None,
173
+ controlnet=controlnet_canny,
174
+ vae=vae,
175
+ )
176
+ self.canny_torch = SobelOperator(device=device)
177
+ # Load LCM LoRA
178
+ self.pipe.load_lora_weights(lcm_lora_id, adapter_name="lcm")
179
+ self.pipe.load_lora_weights(
180
+ "CiroN2022/toy-face",
181
+ weight_name="toy_face_sdxl.safetensors",
182
+ adapter_name="toy",
183
+ )
184
+ self.pipe.set_adapters(["lcm", "toy"], adapter_weights=[1.0, 0.8])
185
+
186
+ self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)
187
+ self.pipe.set_progress_bar_config(disable=True)
188
+ self.pipe.to(device=device, dtype=torch_dtype).to(device)
189
+
190
+ if psutil.virtual_memory().total < 64 * 1024**3:
191
+ self.pipe.enable_attention_slicing()
192
+
193
+ self.pipe.compel_proc = Compel(
194
+ tokenizer=[self.pipe.tokenizer, self.pipe.tokenizer_2],
195
+ text_encoder=[self.pipe.text_encoder, self.pipe.text_encoder_2],
196
+ returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
197
+ requires_pooled=[False, True],
198
+ )
199
+
200
+ if args.torch_compile:
201
+ self.pipe.unet = torch.compile(
202
+ self.pipe.unet, mode="reduce-overhead", fullgraph=True
203
+ )
204
+ self.pipe.vae = torch.compile(
205
+ self.pipe.vae, mode="reduce-overhead", fullgraph=True
206
+ )
207
+ self.pipe(
208
+ prompt="warmup",
209
+ image=[Image.new("RGB", (768, 768))],
210
+ control_image=[Image.new("RGB", (768, 768))],
211
+ )
212
+
213
+ def predict(self, params: "Pipeline.InputParams") -> Image.Image:
214
+ generator = torch.manual_seed(params.seed)
215
+ print(f"Using model: {params.model_id}")
216
+ # pipe = self.pipes[params.model_id]
217
+
218
+ # activation_token = base_models[params.model_id]
219
+ # prompt = f"{activation_token} {params.prompt}"
220
+ prompt_embeds, pooled_prompt_embeds = self.pipe.compel_proc(
221
+ [params.prompt, params.negative_prompt]
222
+ )
223
+ control_image = self.canny_torch(
224
+ params.image, params.canny_low_threshold, params.canny_high_threshold
225
+ )
226
+
227
+ results = self.pipe(
228
+ image=params.image,
229
+ control_image=control_image,
230
+ prompt_embeds=prompt_embeds[0:1],
231
+ pooled_prompt_embeds=pooled_prompt_embeds[0:1],
232
+ negative_prompt_embeds=prompt_embeds[1:2],
233
+ negative_pooled_prompt_embeds=pooled_prompt_embeds[1:2],
234
+ generator=generator,
235
+ strength=params.strength,
236
+ num_inference_steps=params.steps,
237
+ guidance_scale=params.guidance_scale,
238
+ width=params.width,
239
+ height=params.height,
240
+ output_type="pil",
241
+ controlnet_conditioning_scale=params.controlnet_scale,
242
+ control_guidance_start=params.controlnet_start,
243
+ control_guidance_end=params.controlnet_end,
244
+ )
245
+
246
+ nsfw_content_detected = (
247
+ results.nsfw_content_detected[0]
248
+ if "nsfw_content_detected" in results
249
+ else False
250
+ )
251
+ if nsfw_content_detected:
252
+ return None
253
+ result_image = results.images[0]
254
+ if params.debug_canny:
255
+ # paste control_image on top of result_image
256
+ w0, h0 = (200, 200)
257
+ control_image = control_image.resize((w0, h0))
258
+ w1, h1 = result_image.size
259
+ result_image.paste(control_image, (w1 - w0, h1 - h0))
260
+
261
+ return result_image
pipelines/{txt2imglora.py → txt2imgLoRA.py} RENAMED
File without changes