Upload lora-scripts/sd-scripts/sdxl_minimal_inference.py with huggingface_hub
Browse files
lora-scripts/sd-scripts/sdxl_minimal_inference.py
ADDED
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 手元で推論を行うための最低限のコード。HuggingFace/DiffusersのCLIP、schedulerとVAEを使う
|
2 |
+
# Minimal code for performing inference at local. Use HuggingFace/Diffusers CLIP, scheduler and VAE
|
3 |
+
|
4 |
+
import argparse
|
5 |
+
import datetime
|
6 |
+
import math
|
7 |
+
import os
|
8 |
+
import random
|
9 |
+
from einops import repeat
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
import torch
|
13 |
+
from library.device_utils import init_ipex, get_preferred_device
|
14 |
+
|
15 |
+
init_ipex()
|
16 |
+
|
17 |
+
from tqdm import tqdm
|
18 |
+
from transformers import CLIPTokenizer
|
19 |
+
from diffusers import EulerDiscreteScheduler
|
20 |
+
from PIL import Image
|
21 |
+
|
22 |
+
# import open_clip
|
23 |
+
from safetensors.torch import load_file
|
24 |
+
|
25 |
+
from library import model_util, sdxl_model_util
|
26 |
+
import networks.lora as lora
|
27 |
+
from library.utils import setup_logging
|
28 |
+
|
29 |
+
setup_logging()
|
30 |
+
import logging
|
31 |
+
|
32 |
+
logger = logging.getLogger(__name__)
|
33 |
+
|
34 |
+
# scheduler: このあたりの設定はSD1/2と同じでいいらしい
|
35 |
+
# scheduler: The settings around here seem to be the same as SD1/2
|
36 |
+
SCHEDULER_LINEAR_START = 0.00085
|
37 |
+
SCHEDULER_LINEAR_END = 0.0120
|
38 |
+
SCHEDULER_TIMESTEPS = 1000
|
39 |
+
SCHEDLER_SCHEDULE = "scaled_linear"
|
40 |
+
|
41 |
+
|
42 |
+
# Time EmbeddingはDiffusersからのコピー
|
43 |
+
# Time Embedding is copied from Diffusers
|
44 |
+
|
45 |
+
|
46 |
+
def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
|
47 |
+
"""
|
48 |
+
Create sinusoidal timestep embeddings.
|
49 |
+
:param timesteps: a 1-D Tensor of N indices, one per batch element.
|
50 |
+
These may be fractional.
|
51 |
+
:param dim: the dimension of the output.
|
52 |
+
:param max_period: controls the minimum frequency of the embeddings.
|
53 |
+
:return: an [N x dim] Tensor of positional embeddings.
|
54 |
+
"""
|
55 |
+
if not repeat_only:
|
56 |
+
half = dim // 2
|
57 |
+
freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(
|
58 |
+
device=timesteps.device
|
59 |
+
)
|
60 |
+
args = timesteps[:, None].float() * freqs[None]
|
61 |
+
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
62 |
+
if dim % 2:
|
63 |
+
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
64 |
+
else:
|
65 |
+
embedding = repeat(timesteps, "b -> b d", d=dim)
|
66 |
+
return embedding
|
67 |
+
|
68 |
+
|
69 |
+
def get_timestep_embedding(x, outdim):
|
70 |
+
assert len(x.shape) == 2
|
71 |
+
b, dims = x.shape[0], x.shape[1]
|
72 |
+
# x = rearrange(x, "b d -> (b d)")
|
73 |
+
x = torch.flatten(x)
|
74 |
+
emb = timestep_embedding(x, outdim)
|
75 |
+
# emb = rearrange(emb, "(b d) d2 -> b (d d2)", b=b, d=dims, d2=outdim)
|
76 |
+
emb = torch.reshape(emb, (b, dims * outdim))
|
77 |
+
return emb
|
78 |
+
|
79 |
+
|
80 |
+
if __name__ == "__main__":
|
81 |
+
# 画像生成条件を変更する場合はここを変更 / change here to change image generation conditions
|
82 |
+
|
83 |
+
# SDXLの追加のvector embeddingへ渡す値 / Values to pass to additional vector embedding of SDXL
|
84 |
+
target_height = 1024
|
85 |
+
target_width = 1024
|
86 |
+
original_height = target_height
|
87 |
+
original_width = target_width
|
88 |
+
crop_top = 0
|
89 |
+
crop_left = 0
|
90 |
+
|
91 |
+
steps = 50
|
92 |
+
guidance_scale = 7
|
93 |
+
seed = None # 1
|
94 |
+
|
95 |
+
DEVICE = get_preferred_device()
|
96 |
+
DTYPE = torch.float16 # bfloat16 may work
|
97 |
+
|
98 |
+
parser = argparse.ArgumentParser()
|
99 |
+
parser.add_argument("--ckpt_path", type=str, required=True)
|
100 |
+
parser.add_argument("--prompt", type=str, default="A photo of a cat")
|
101 |
+
parser.add_argument("--prompt2", type=str, default=None)
|
102 |
+
parser.add_argument("--negative_prompt", type=str, default="")
|
103 |
+
parser.add_argument("--output_dir", type=str, default=".")
|
104 |
+
parser.add_argument(
|
105 |
+
"--lora_weights",
|
106 |
+
type=str,
|
107 |
+
nargs="*",
|
108 |
+
default=[],
|
109 |
+
help="LoRA weights, only supports networks.lora, each argument is a `path;multiplier` (semi-colon separated)",
|
110 |
+
)
|
111 |
+
parser.add_argument("--interactive", action="store_true")
|
112 |
+
args = parser.parse_args()
|
113 |
+
|
114 |
+
if args.prompt2 is None:
|
115 |
+
args.prompt2 = args.prompt
|
116 |
+
|
117 |
+
# HuggingFaceのmodel id
|
118 |
+
text_encoder_1_name = "openai/clip-vit-large-patch14"
|
119 |
+
text_encoder_2_name = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k"
|
120 |
+
|
121 |
+
# checkpointを読み込む。モデル変換についてはそちらの関数を参照
|
122 |
+
# Load checkpoint. For model conversion, see this function
|
123 |
+
|
124 |
+
# 本体RAMが少ない場合はGPUにロードするといいかも
|
125 |
+
# If the main RAM is small, it may be better to load it on the GPU
|
126 |
+
text_model1, text_model2, vae, unet, _, _ = sdxl_model_util.load_models_from_sdxl_checkpoint(
|
127 |
+
sdxl_model_util.MODEL_VERSION_SDXL_BASE_V1_0, args.ckpt_path, "cpu"
|
128 |
+
)
|
129 |
+
|
130 |
+
# Text Encoder 1はSDXL本体でもHuggingFaceのものを使っている
|
131 |
+
# In SDXL, Text Encoder 1 is also using HuggingFace's
|
132 |
+
|
133 |
+
# Text Encoder 2はSDXL本体ではopen_clipを使っている
|
134 |
+
# それを使ってもいいが、SD2のDiffusers版に合わせる形で、HuggingFaceのものを使う
|
135 |
+
# 重みの変換コードはSD2とほぼ同じ
|
136 |
+
# In SDXL, Text Encoder 2 is using open_clip
|
137 |
+
# It's okay to use it, but to match the Diffusers version of SD2, use HuggingFace's
|
138 |
+
# The weight conversion code is almost the same as SD2
|
139 |
+
|
140 |
+
# VAEの構造はSDXLもSD1/2と同じだが、重みは異なるようだ。何より謎のscale値が違う
|
141 |
+
# fp16でNaNが出やすいようだ
|
142 |
+
# The structure of VAE is the same as SD1/2, but the weights seem to be different. Above all, the mysterious scale value is different.
|
143 |
+
# NaN seems to be more likely to occur in fp16
|
144 |
+
|
145 |
+
unet.to(DEVICE, dtype=DTYPE)
|
146 |
+
unet.eval()
|
147 |
+
|
148 |
+
vae_dtype = DTYPE
|
149 |
+
if DTYPE == torch.float16:
|
150 |
+
logger.info("use float32 for vae")
|
151 |
+
vae_dtype = torch.float32
|
152 |
+
vae.to(DEVICE, dtype=vae_dtype)
|
153 |
+
vae.eval()
|
154 |
+
|
155 |
+
text_model1.to(DEVICE, dtype=DTYPE)
|
156 |
+
text_model1.eval()
|
157 |
+
text_model2.to(DEVICE, dtype=DTYPE)
|
158 |
+
text_model2.eval()
|
159 |
+
|
160 |
+
unet.set_use_memory_efficient_attention(True, False)
|
161 |
+
if torch.__version__ >= "2.0.0": # PyTorch 2.0.0 以上対応のxformersなら以下が使える
|
162 |
+
vae.set_use_memory_efficient_attention_xformers(True)
|
163 |
+
|
164 |
+
# Tokenizers
|
165 |
+
tokenizer1 = CLIPTokenizer.from_pretrained(text_encoder_1_name)
|
166 |
+
# tokenizer2 = lambda x: open_clip.tokenize(x, context_length=77)
|
167 |
+
tokenizer2 = CLIPTokenizer.from_pretrained(text_encoder_2_name)
|
168 |
+
|
169 |
+
# LoRA
|
170 |
+
for weights_file in args.lora_weights:
|
171 |
+
if ";" in weights_file:
|
172 |
+
weights_file, multiplier = weights_file.split(";")
|
173 |
+
multiplier = float(multiplier)
|
174 |
+
else:
|
175 |
+
multiplier = 1.0
|
176 |
+
|
177 |
+
lora_model, weights_sd = lora.create_network_from_weights(
|
178 |
+
multiplier, weights_file, vae, [text_model1, text_model2], unet, None, True
|
179 |
+
)
|
180 |
+
lora_model.merge_to([text_model1, text_model2], unet, weights_sd, DTYPE, DEVICE)
|
181 |
+
|
182 |
+
# scheduler
|
183 |
+
scheduler = EulerDiscreteScheduler(
|
184 |
+
num_train_timesteps=SCHEDULER_TIMESTEPS,
|
185 |
+
beta_start=SCHEDULER_LINEAR_START,
|
186 |
+
beta_end=SCHEDULER_LINEAR_END,
|
187 |
+
beta_schedule=SCHEDLER_SCHEDULE,
|
188 |
+
)
|
189 |
+
|
190 |
+
def generate_image(prompt, prompt2, negative_prompt, seed=None):
|
191 |
+
# 将来的にサイズ情報も変えられるようにする / Make it possible to change the size information in the future
|
192 |
+
# prepare embedding
|
193 |
+
with torch.no_grad():
|
194 |
+
# vector
|
195 |
+
emb1 = get_timestep_embedding(torch.FloatTensor([original_height, original_width]).unsqueeze(0), 256)
|
196 |
+
emb2 = get_timestep_embedding(torch.FloatTensor([crop_top, crop_left]).unsqueeze(0), 256)
|
197 |
+
emb3 = get_timestep_embedding(torch.FloatTensor([target_height, target_width]).unsqueeze(0), 256)
|
198 |
+
# logger.info("emb1", emb1.shape)
|
199 |
+
c_vector = torch.cat([emb1, emb2, emb3], dim=1).to(DEVICE, dtype=DTYPE)
|
200 |
+
uc_vector = c_vector.clone().to(
|
201 |
+
DEVICE, dtype=DTYPE
|
202 |
+
) # ちょっとここ正しいかどうかわからない I'm not sure if this is right
|
203 |
+
|
204 |
+
# crossattn
|
205 |
+
|
206 |
+
# Text Encoderを二つ呼ぶ関数 Function to call two Text Encoders
|
207 |
+
def call_text_encoder(text, text2):
|
208 |
+
# text encoder 1
|
209 |
+
batch_encoding = tokenizer1(
|
210 |
+
text,
|
211 |
+
truncation=True,
|
212 |
+
return_length=True,
|
213 |
+
return_overflowing_tokens=False,
|
214 |
+
padding="max_length",
|
215 |
+
return_tensors="pt",
|
216 |
+
)
|
217 |
+
tokens = batch_encoding["input_ids"].to(DEVICE)
|
218 |
+
|
219 |
+
with torch.no_grad():
|
220 |
+
enc_out = text_model1(tokens, output_hidden_states=True, return_dict=True)
|
221 |
+
text_embedding1 = enc_out["hidden_states"][11]
|
222 |
+
# text_embedding = pipe.text_encoder.text_model.final_layer_norm(text_embedding) # layer normは通さないらしい
|
223 |
+
|
224 |
+
# text encoder 2
|
225 |
+
# tokens = tokenizer2(text2).to(DEVICE)
|
226 |
+
tokens = tokenizer2(
|
227 |
+
text,
|
228 |
+
truncation=True,
|
229 |
+
return_length=True,
|
230 |
+
return_overflowing_tokens=False,
|
231 |
+
padding="max_length",
|
232 |
+
return_tensors="pt",
|
233 |
+
)
|
234 |
+
tokens = batch_encoding["input_ids"].to(DEVICE)
|
235 |
+
|
236 |
+
with torch.no_grad():
|
237 |
+
enc_out = text_model2(tokens, output_hidden_states=True, return_dict=True)
|
238 |
+
text_embedding2_penu = enc_out["hidden_states"][-2]
|
239 |
+
# logger.info("hidden_states2", text_embedding2_penu.shape)
|
240 |
+
text_embedding2_pool = enc_out["text_embeds"] # do not support Textual Inversion
|
241 |
+
|
242 |
+
# 連結して終了 concat and finish
|
243 |
+
text_embedding = torch.cat([text_embedding1, text_embedding2_penu], dim=2)
|
244 |
+
return text_embedding, text_embedding2_pool
|
245 |
+
|
246 |
+
# cond
|
247 |
+
c_ctx, c_ctx_pool = call_text_encoder(prompt, prompt2)
|
248 |
+
# logger.info(c_ctx.shape, c_ctx_p.shape, c_vector.shape)
|
249 |
+
c_vector = torch.cat([c_ctx_pool, c_vector], dim=1)
|
250 |
+
|
251 |
+
# uncond
|
252 |
+
uc_ctx, uc_ctx_pool = call_text_encoder(negative_prompt, negative_prompt)
|
253 |
+
uc_vector = torch.cat([uc_ctx_pool, uc_vector], dim=1)
|
254 |
+
|
255 |
+
text_embeddings = torch.cat([uc_ctx, c_ctx])
|
256 |
+
vector_embeddings = torch.cat([uc_vector, c_vector])
|
257 |
+
|
258 |
+
# メモリ使用量を減らすにはここでText Encoderを削除するかCPUへ移動する
|
259 |
+
|
260 |
+
if seed is not None:
|
261 |
+
random.seed(seed)
|
262 |
+
np.random.seed(seed)
|
263 |
+
torch.manual_seed(seed)
|
264 |
+
torch.cuda.manual_seed_all(seed)
|
265 |
+
|
266 |
+
# # random generator for initial noise
|
267 |
+
# generator = torch.Generator(device="cuda").manual_seed(seed)
|
268 |
+
generator = None
|
269 |
+
else:
|
270 |
+
generator = None
|
271 |
+
|
272 |
+
# get the initial random noise unless the user supplied it
|
273 |
+
# SDXLはCPUでlatentsを作成しているので一応合わせておく、Diffusersはtarget deviceでlatentsを作成している
|
274 |
+
# SDXL creates latents in CPU, Diffusers creates latents in target device
|
275 |
+
latents_shape = (1, 4, target_height // 8, target_width // 8)
|
276 |
+
latents = torch.randn(
|
277 |
+
latents_shape,
|
278 |
+
generator=generator,
|
279 |
+
device="cpu",
|
280 |
+
dtype=torch.float32,
|
281 |
+
).to(DEVICE, dtype=DTYPE)
|
282 |
+
|
283 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
284 |
+
latents = latents * scheduler.init_noise_sigma
|
285 |
+
|
286 |
+
# set timesteps
|
287 |
+
scheduler.set_timesteps(steps, DEVICE)
|
288 |
+
|
289 |
+
# このへんはDiffusersからのコピペ
|
290 |
+
# Copy from Diffusers
|
291 |
+
timesteps = scheduler.timesteps.to(DEVICE) # .to(DTYPE)
|
292 |
+
num_latent_input = 2
|
293 |
+
with torch.no_grad():
|
294 |
+
for i, t in enumerate(tqdm(timesteps)):
|
295 |
+
# expand the latents if we are doing classifier free guidance
|
296 |
+
latent_model_input = latents.repeat((num_latent_input, 1, 1, 1))
|
297 |
+
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
|
298 |
+
|
299 |
+
noise_pred = unet(latent_model_input, t, text_embeddings, vector_embeddings)
|
300 |
+
|
301 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(num_latent_input) # uncond by negative prompt
|
302 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
303 |
+
|
304 |
+
# compute the previous noisy sample x_t -> x_t-1
|
305 |
+
# latents = scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
306 |
+
latents = scheduler.step(noise_pred, t, latents).prev_sample
|
307 |
+
|
308 |
+
# latents = 1 / 0.18215 * latents
|
309 |
+
latents = 1 / sdxl_model_util.VAE_SCALE_FACTOR * latents
|
310 |
+
latents = latents.to(vae_dtype)
|
311 |
+
image = vae.decode(latents).sample
|
312 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
313 |
+
|
314 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
|
315 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
316 |
+
|
317 |
+
# image = self.numpy_to_pil(image)
|
318 |
+
image = (image * 255).round().astype("uint8")
|
319 |
+
image = [Image.fromarray(im) for im in image]
|
320 |
+
|
321 |
+
# 保存して終了 save and finish
|
322 |
+
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
323 |
+
for i, img in enumerate(image):
|
324 |
+
img.save(os.path.join(args.output_dir, f"image_{timestamp}_{i:03d}.png"))
|
325 |
+
|
326 |
+
if not args.interactive:
|
327 |
+
generate_image(args.prompt, args.prompt2, args.negative_prompt, seed)
|
328 |
+
else:
|
329 |
+
# loop for interactive
|
330 |
+
while True:
|
331 |
+
prompt = input("prompt: ")
|
332 |
+
if prompt == "":
|
333 |
+
break
|
334 |
+
prompt2 = input("prompt2: ")
|
335 |
+
if prompt2 == "":
|
336 |
+
prompt2 = prompt
|
337 |
+
negative_prompt = input("negative prompt: ")
|
338 |
+
seed = input("seed: ")
|
339 |
+
if seed == "":
|
340 |
+
seed = None
|
341 |
+
else:
|
342 |
+
seed = int(seed)
|
343 |
+
generate_image(prompt, prompt2, negative_prompt, seed)
|
344 |
+
|
345 |
+
logger.info("Done!")
|