Spaces:
Runtime error
Runtime error
add storyDiffusion
Browse files- examples/lecun/yann-lecun2.jpeg +0 -0
- examples/story_examples.py +87 -0
- examples/taylor/1.jpeg +0 -0
- launch/story_generator.py +647 -0
- requirements.txt +3 -2
- storyDiffusion/fonts/Inkfree.ttf +0 -0
- storyDiffusion/utils/__init__.py +7 -0
- storyDiffusion/utils/gradio_utils.py +416 -0
- storyDiffusion/utils/model.py +113 -0
- storyDiffusion/utils/pipeline.py +577 -0
- storyDiffusion/utils/style_template.py +39 -0
- storyDiffusion/utils/utils.py +421 -0
examples/lecun/yann-lecun2.jpeg
ADDED
examples/story_examples.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
|
4 |
+
def get_image_path_list(folder_name):
|
5 |
+
image_basename_list = os.listdir(folder_name)
|
6 |
+
image_path_list = sorted([os.path.join(folder_name, basename)
|
7 |
+
for basename in image_basename_list])
|
8 |
+
return image_path_list
|
9 |
+
|
10 |
+
|
11 |
+
def array2string(arr):
|
12 |
+
stringtmp = ""
|
13 |
+
for i, part in enumerate(arr):
|
14 |
+
if i != len(arr)-1:
|
15 |
+
stringtmp += part + "\n"
|
16 |
+
else:
|
17 |
+
stringtmp += part
|
18 |
+
|
19 |
+
return stringtmp
|
20 |
+
|
21 |
+
|
22 |
+
def get_examples():
|
23 |
+
return [
|
24 |
+
[0, 0.5, 0.5, 2, "a man, wearing black suit",
|
25 |
+
"bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs",
|
26 |
+
array2string(["at home, read new paper #at home, The newspaper says there is a treasure house in the forest.",
|
27 |
+
"on the road, near the forest",
|
28 |
+
"[NC] The car on the road, near the forest #He drives to the forest in search of treasure.",
|
29 |
+
"[NC]A tiger appeared in the forest, at night ",
|
30 |
+
"very frightened, open mouth, in the forest, at night",
|
31 |
+
"running very fast, in the forest, at night",
|
32 |
+
"[NC] A house in the forest, at night #Suddenly, he discovers the treasure house!",
|
33 |
+
"in the house filled with treasure, laughing, at night #He is overjoyed inside the house."
|
34 |
+
]),
|
35 |
+
"Comic book", "Only Using Textual Description", get_image_path_list(
|
36 |
+
'./examples/taylor'), 768, 768
|
37 |
+
],
|
38 |
+
[0, 0.5, 0.5, 2, "a policeman img, wearing a white shirt",
|
39 |
+
"bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs",
|
40 |
+
array2string(["Directing traffic on the road. ",
|
41 |
+
"walking on the streets.",
|
42 |
+
"Chasing a man on the street.",
|
43 |
+
"At the police station.",
|
44 |
+
]),
|
45 |
+
"Japanese Anime", "Using Ref Images", get_image_path_list(
|
46 |
+
'./examples/lecun'), 768, 768
|
47 |
+
],
|
48 |
+
[1, 0.5, 0.5, 3, "a woman img, wearing a white T-shirt, blue loose hair",
|
49 |
+
"bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs",
|
50 |
+
array2string(["wake up in the bed",
|
51 |
+
"have breakfast",
|
52 |
+
"is on the road, go to company",
|
53 |
+
"work in the company",
|
54 |
+
"Take a walk next to the company at noon",
|
55 |
+
"lying in bed at night"]),
|
56 |
+
"Japanese Anime", "Using Ref Images", get_image_path_list(
|
57 |
+
'./examples/taylor'), 768, 768
|
58 |
+
],
|
59 |
+
[0, 0.5, 0.5, 3, "a man, wearing black jacket",
|
60 |
+
"bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs",
|
61 |
+
array2string(["wake up in the bed",
|
62 |
+
"have breakfast",
|
63 |
+
"is on the road, go to the company, close look",
|
64 |
+
"work in the company",
|
65 |
+
"laughing happily",
|
66 |
+
"lying in bed at night"
|
67 |
+
]),
|
68 |
+
"Japanese Anime", "Only Using Textual Description", get_image_path_list(
|
69 |
+
'./examples/taylor'), 768, 768
|
70 |
+
],
|
71 |
+
[0, 0.3, 0.5, 3, "a girl, wearing white shirt, black skirt, black tie, yellow hair",
|
72 |
+
"bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs",
|
73 |
+
array2string([
|
74 |
+
"at home #at home, began to go to drawing",
|
75 |
+
"sitting alone on a park bench.",
|
76 |
+
"reading a book on a park bench.",
|
77 |
+
"[NC]A squirrel approaches, peeking over the bench. ",
|
78 |
+
"look around in the park. # She looks around and enjoys the beauty of nature.",
|
79 |
+
"[NC]leaf falls from the tree, landing on the sketchbook.",
|
80 |
+
"picks up the leaf, examining its details closely.",
|
81 |
+
"[NC]The brown squirrel appear.",
|
82 |
+
"is very happy # She is very happy to see the squirrel again",
|
83 |
+
"[NC]The brown squirrel takes the cracker and scampers up a tree. # She gives the squirrel cracker"]),
|
84 |
+
"Japanese Anime", "Only Using Textual Description", get_image_path_list(
|
85 |
+
'./examples/taylor'), 768, 768
|
86 |
+
]
|
87 |
+
]
|
examples/taylor/1.jpeg
ADDED
launch/story_generator.py
ADDED
@@ -0,0 +1,647 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from examples.story_examples import get_examples
|
3 |
+
import spaces
|
4 |
+
import numpy as np
|
5 |
+
import torch
|
6 |
+
import random
|
7 |
+
import os
|
8 |
+
import torch.nn.functional as F
|
9 |
+
from diffusers import StableDiffusionXLPipeline, DDIMScheduler
|
10 |
+
import copy
|
11 |
+
from huggingface_hub import hf_hub_download
|
12 |
+
from diffusers.utils import load_image
|
13 |
+
|
14 |
+
from storyDiffusion.utils.gradio_utils import AttnProcessor2_0 as AttnProcessor, cal_attn_mask_xl
|
15 |
+
from storyDiffusion.utils import PhotoMakerStableDiffusionXLPipeline
|
16 |
+
from storyDiffusion.utils.utils import get_comic
|
17 |
+
from storyDiffusion.utils.style_template import styles
|
18 |
+
|
19 |
+
|
20 |
+
# Constants
|
21 |
+
image_encoder_path = "./data/models/ip_adapter/sdxl_models/image_encoder"
|
22 |
+
ip_ckpt = "./data/models/ip_adapter/sdxl_models/ip-adapter_sdxl_vit-h.bin"
|
23 |
+
os.environ["no_proxy"] = "localhost,127.0.0.1,::1"
|
24 |
+
STYLE_NAMES = list(styles.keys())
|
25 |
+
DEFAULT_STYLE_NAME = "Japanese Anime"
|
26 |
+
MAX_SEED = np.iinfo(np.int32).max
|
27 |
+
|
28 |
+
# Global variables
|
29 |
+
global models_dict, use_va, photomaker_path, pipe2, pipe4, attn_count, total_count, id_length, total_length, cur_step, cur_model_type, write, sa32, sa64, height, width, attn_procs, unet, num_steps
|
30 |
+
|
31 |
+
models_dict = {
|
32 |
+
"RealVision": "SG161222/RealVisXL_V4.0",
|
33 |
+
"Unstable": "stablediffusionapi/sdxl-unstable-diffusers-y"
|
34 |
+
}
|
35 |
+
use_va = True
|
36 |
+
photomaker_path = hf_hub_download(
|
37 |
+
repo_id="TencentARC/PhotoMaker", filename="photomaker-v1.bin", repo_type="model")
|
38 |
+
device = "cuda"
|
39 |
+
|
40 |
+
# Functions
|
41 |
+
|
42 |
+
|
43 |
+
def setup_seed(seed):
|
44 |
+
torch.manual_seed(seed)
|
45 |
+
torch.cuda.manual_seed_all(seed)
|
46 |
+
np.random.seed(seed)
|
47 |
+
random.seed(seed)
|
48 |
+
torch.backends.cudnn.deterministic = True
|
49 |
+
|
50 |
+
|
51 |
+
def set_text_unfinished():
|
52 |
+
return gr.update(visible=True, value="<h3>(Not Finished) Generating ··· The intermediate results will be shown.</h3>")
|
53 |
+
|
54 |
+
|
55 |
+
def set_text_finished():
|
56 |
+
return gr.update(visible=True, value="<h3>Generation Finished</h3>")
|
57 |
+
|
58 |
+
|
59 |
+
class SpatialAttnProcessor2_0(torch.nn.Module):
|
60 |
+
r"""
|
61 |
+
Attention processor for IP-Adapater for PyTorch 2.0.
|
62 |
+
Args:
|
63 |
+
hidden_size (`int`):
|
64 |
+
The hidden size of the attention layer.
|
65 |
+
cross_attention_dim (`int`):
|
66 |
+
The number of channels in the `encoder_hidden_states`.
|
67 |
+
text_context_len (`int`, defaults to 77):
|
68 |
+
The context length of the text features.
|
69 |
+
scale (`float`, defaults to 1.0):
|
70 |
+
the weight scale of image prompt.
|
71 |
+
"""
|
72 |
+
|
73 |
+
def __init__(self, hidden_size=None, cross_attention_dim=None, id_length=4, device="cuda", dtype=torch.float16):
|
74 |
+
super().__init__()
|
75 |
+
if not hasattr(F, "scaled_dot_product_attention"):
|
76 |
+
raise ImportError(
|
77 |
+
"AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
|
78 |
+
self.device = device
|
79 |
+
self.dtype = dtype
|
80 |
+
self.hidden_size = hidden_size
|
81 |
+
self.cross_attention_dim = cross_attention_dim
|
82 |
+
self.total_length = id_length + 1
|
83 |
+
self.id_length = id_length
|
84 |
+
self.id_bank = {}
|
85 |
+
|
86 |
+
def __call__(
|
87 |
+
self,
|
88 |
+
attn,
|
89 |
+
hidden_states,
|
90 |
+
encoder_hidden_states=None,
|
91 |
+
attention_mask=None,
|
92 |
+
temb=None):
|
93 |
+
# un_cond_hidden_states, cond_hidden_states = hidden_states.chunk(2)
|
94 |
+
# un_cond_hidden_states = self.__call2__(attn, un_cond_hidden_states,encoder_hidden_states,attention_mask,temb)
|
95 |
+
# 生成一个0到1之间的随机数
|
96 |
+
global total_count, attn_count, cur_step, mask1024, mask4096
|
97 |
+
global sa32, sa64
|
98 |
+
global write
|
99 |
+
global height, width
|
100 |
+
global num_steps
|
101 |
+
if write:
|
102 |
+
# print(f"white:{cur_step}")
|
103 |
+
self.id_bank[cur_step] = [
|
104 |
+
hidden_states[:self.id_length], hidden_states[self.id_length:]]
|
105 |
+
else:
|
106 |
+
encoder_hidden_states = torch.cat((self.id_bank[cur_step][0].to(
|
107 |
+
self.device), hidden_states[:1], self.id_bank[cur_step][1].to(self.device), hidden_states[1:]))
|
108 |
+
# 判断随机数是否大于0.5
|
109 |
+
if cur_step <= 1:
|
110 |
+
hidden_states = self.__call2__(
|
111 |
+
attn, hidden_states, None, attention_mask, temb)
|
112 |
+
else: # 256 1024 4096
|
113 |
+
random_number = random.random()
|
114 |
+
if cur_step < 0.4 * num_steps:
|
115 |
+
rand_num = 0.3
|
116 |
+
else:
|
117 |
+
rand_num = 0.1
|
118 |
+
# print(f"hidden state shape {hidden_states.shape[1]}")
|
119 |
+
if random_number > rand_num:
|
120 |
+
# print("mask shape",mask1024.shape,mask4096.shape)
|
121 |
+
if not write:
|
122 |
+
if hidden_states.shape[1] == (height//32) * (width//32):
|
123 |
+
attention_mask = mask1024[mask1024.shape[0] //
|
124 |
+
self.total_length * self.id_length:]
|
125 |
+
else:
|
126 |
+
attention_mask = mask4096[mask4096.shape[0] //
|
127 |
+
self.total_length * self.id_length:]
|
128 |
+
else:
|
129 |
+
# print(self.total_length,self.id_length,hidden_states.shape,(height//32) * (width//32))
|
130 |
+
if hidden_states.shape[1] == (height//32) * (width//32):
|
131 |
+
attention_mask = mask1024[:mask1024.shape[0] // self.total_length *
|
132 |
+
self.id_length, :mask1024.shape[0] // self.total_length * self.id_length]
|
133 |
+
else:
|
134 |
+
attention_mask = mask4096[:mask4096.shape[0] // self.total_length *
|
135 |
+
self.id_length, :mask4096.shape[0] // self.total_length * self.id_length]
|
136 |
+
# print(attention_mask.shape)
|
137 |
+
# print("before attention",hidden_states.shape,attention_mask.shape,encoder_hidden_states.shape if encoder_hidden_states is not None else "None")
|
138 |
+
hidden_states = self.__call1__(
|
139 |
+
attn, hidden_states, encoder_hidden_states, attention_mask, temb)
|
140 |
+
else:
|
141 |
+
hidden_states = self.__call2__(
|
142 |
+
attn, hidden_states, None, attention_mask, temb)
|
143 |
+
attn_count += 1
|
144 |
+
if attn_count == total_count:
|
145 |
+
attn_count = 0
|
146 |
+
cur_step += 1
|
147 |
+
mask1024, mask4096 = cal_attn_mask_xl(
|
148 |
+
self.total_length, self.id_length, sa32, sa64, height, width, device=self.device, dtype=self.dtype)
|
149 |
+
|
150 |
+
return hidden_states
|
151 |
+
|
152 |
+
def __call1__(
|
153 |
+
self,
|
154 |
+
attn,
|
155 |
+
hidden_states,
|
156 |
+
encoder_hidden_states=None,
|
157 |
+
attention_mask=None,
|
158 |
+
temb=None,
|
159 |
+
):
|
160 |
+
# print("hidden state shape",hidden_states.shape,self.id_length)
|
161 |
+
residual = hidden_states
|
162 |
+
# if encoder_hidden_states is not None:
|
163 |
+
# raise Exception("not implement")
|
164 |
+
if attn.spatial_norm is not None:
|
165 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
166 |
+
input_ndim = hidden_states.ndim
|
167 |
+
|
168 |
+
if input_ndim == 4:
|
169 |
+
total_batch_size, channel, height, width = hidden_states.shape
|
170 |
+
hidden_states = hidden_states.view(
|
171 |
+
total_batch_size, channel, height * width).transpose(1, 2)
|
172 |
+
total_batch_size, nums_token, channel = hidden_states.shape
|
173 |
+
img_nums = total_batch_size//2
|
174 |
+
hidden_states = hidden_states.view(-1, img_nums, nums_token,
|
175 |
+
channel).reshape(-1, img_nums * nums_token, channel)
|
176 |
+
|
177 |
+
batch_size, sequence_length, _ = hidden_states.shape
|
178 |
+
|
179 |
+
if attn.group_norm is not None:
|
180 |
+
hidden_states = attn.group_norm(
|
181 |
+
hidden_states.transpose(1, 2)).transpose(1, 2)
|
182 |
+
|
183 |
+
query = attn.to_q(hidden_states)
|
184 |
+
|
185 |
+
if encoder_hidden_states is None:
|
186 |
+
encoder_hidden_states = hidden_states # B, N, C
|
187 |
+
else:
|
188 |
+
encoder_hidden_states = encoder_hidden_states.view(
|
189 |
+
-1, self.id_length+1, nums_token, channel).reshape(-1, (self.id_length+1) * nums_token, channel)
|
190 |
+
|
191 |
+
key = attn.to_k(encoder_hidden_states)
|
192 |
+
value = attn.to_v(encoder_hidden_states)
|
193 |
+
|
194 |
+
inner_dim = key.shape[-1]
|
195 |
+
head_dim = inner_dim // attn.heads
|
196 |
+
|
197 |
+
query = query.view(batch_size, -1, attn.heads,
|
198 |
+
head_dim).transpose(1, 2)
|
199 |
+
|
200 |
+
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
201 |
+
value = value.view(batch_size, -1, attn.heads,
|
202 |
+
head_dim).transpose(1, 2)
|
203 |
+
# print(key.shape,value.shape,query.shape,attention_mask.shape)
|
204 |
+
# the output of sdp = (batch, num_heads, seq_len, head_dim)
|
205 |
+
# TODO: add support for attn.scale when we move to Torch 2.1
|
206 |
+
# print(query.shape,key.shape,value.shape,attention_mask.shape)
|
207 |
+
hidden_states = F.scaled_dot_product_attention(
|
208 |
+
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
|
209 |
+
)
|
210 |
+
|
211 |
+
hidden_states = hidden_states.transpose(1, 2).reshape(
|
212 |
+
total_batch_size, -1, attn.heads * head_dim)
|
213 |
+
hidden_states = hidden_states.to(query.dtype)
|
214 |
+
|
215 |
+
# linear proj
|
216 |
+
hidden_states = attn.to_out[0](hidden_states)
|
217 |
+
# dropout
|
218 |
+
hidden_states = attn.to_out[1](hidden_states)
|
219 |
+
|
220 |
+
# if input_ndim == 4:
|
221 |
+
# tile_hidden_states = tile_hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
222 |
+
|
223 |
+
# if attn.residual_connection:
|
224 |
+
# tile_hidden_states = tile_hidden_states + residual
|
225 |
+
|
226 |
+
if input_ndim == 4:
|
227 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(
|
228 |
+
total_batch_size, channel, height, width)
|
229 |
+
if attn.residual_connection:
|
230 |
+
hidden_states = hidden_states + residual
|
231 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
232 |
+
# print(hidden_states.shape)
|
233 |
+
return hidden_states
|
234 |
+
|
235 |
+
def __call2__(
|
236 |
+
self,
|
237 |
+
attn,
|
238 |
+
hidden_states,
|
239 |
+
encoder_hidden_states=None,
|
240 |
+
attention_mask=None,
|
241 |
+
temb=None):
|
242 |
+
residual = hidden_states
|
243 |
+
|
244 |
+
if attn.spatial_norm is not None:
|
245 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
246 |
+
|
247 |
+
input_ndim = hidden_states.ndim
|
248 |
+
|
249 |
+
if input_ndim == 4:
|
250 |
+
batch_size, channel, height, width = hidden_states.shape
|
251 |
+
hidden_states = hidden_states.view(
|
252 |
+
batch_size, channel, height * width).transpose(1, 2)
|
253 |
+
|
254 |
+
batch_size, sequence_length, channel = (
|
255 |
+
hidden_states.shape
|
256 |
+
)
|
257 |
+
# print(hidden_states.shape)
|
258 |
+
if attention_mask is not None:
|
259 |
+
attention_mask = attn.prepare_attention_mask(
|
260 |
+
attention_mask, sequence_length, batch_size)
|
261 |
+
# scaled_dot_product_attention expects attention_mask shape to be
|
262 |
+
# (batch, heads, source_length, target_length)
|
263 |
+
attention_mask = attention_mask.view(
|
264 |
+
batch_size, attn.heads, -1, attention_mask.shape[-1])
|
265 |
+
|
266 |
+
if attn.group_norm is not None:
|
267 |
+
hidden_states = attn.group_norm(
|
268 |
+
hidden_states.transpose(1, 2)).transpose(1, 2)
|
269 |
+
|
270 |
+
query = attn.to_q(hidden_states)
|
271 |
+
|
272 |
+
if encoder_hidden_states is None:
|
273 |
+
encoder_hidden_states = hidden_states # B, N, C
|
274 |
+
else:
|
275 |
+
encoder_hidden_states = encoder_hidden_states.view(
|
276 |
+
-1, self.id_length+1, sequence_length, channel).reshape(-1, (self.id_length+1) * sequence_length, channel)
|
277 |
+
|
278 |
+
key = attn.to_k(encoder_hidden_states)
|
279 |
+
value = attn.to_v(encoder_hidden_states)
|
280 |
+
|
281 |
+
inner_dim = key.shape[-1]
|
282 |
+
head_dim = inner_dim // attn.heads
|
283 |
+
|
284 |
+
query = query.view(batch_size, -1, attn.heads,
|
285 |
+
head_dim).transpose(1, 2)
|
286 |
+
|
287 |
+
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
288 |
+
value = value.view(batch_size, -1, attn.heads,
|
289 |
+
head_dim).transpose(1, 2)
|
290 |
+
|
291 |
+
# the output of sdp = (batch, num_heads, seq_len, head_dim)
|
292 |
+
# TODO: add support for attn.scale when we move to Torch 2.1
|
293 |
+
hidden_states = F.scaled_dot_product_attention(
|
294 |
+
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
|
295 |
+
)
|
296 |
+
|
297 |
+
hidden_states = hidden_states.transpose(1, 2).reshape(
|
298 |
+
batch_size, -1, attn.heads * head_dim)
|
299 |
+
hidden_states = hidden_states.to(query.dtype)
|
300 |
+
|
301 |
+
# linear proj
|
302 |
+
hidden_states = attn.to_out[0](hidden_states)
|
303 |
+
# dropout
|
304 |
+
hidden_states = attn.to_out[1](hidden_states)
|
305 |
+
|
306 |
+
if input_ndim == 4:
|
307 |
+
hidden_states = hidden_states.transpose(
|
308 |
+
-1, -2).reshape(batch_size, channel, height, width)
|
309 |
+
|
310 |
+
if attn.residual_connection:
|
311 |
+
hidden_states = hidden_states + residual
|
312 |
+
|
313 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
314 |
+
|
315 |
+
return hidden_states
|
316 |
+
|
317 |
+
|
318 |
+
def set_attention_processor(unet, id_length, is_ipadapter=False):
|
319 |
+
global total_count
|
320 |
+
total_count = 0
|
321 |
+
attn_procs = {}
|
322 |
+
for name in unet.attn_processors.keys():
|
323 |
+
cross_attention_dim = None if name.endswith(
|
324 |
+
"attn1.processor") else unet.config.cross_attention_dim
|
325 |
+
if cross_attention_dim is None:
|
326 |
+
if name.startswith("up_blocks"):
|
327 |
+
attn_procs[name] = SpatialAttnProcessor2_0(id_length=id_length)
|
328 |
+
total_count += 1
|
329 |
+
else:
|
330 |
+
attn_procs[name] = AttnProcessor()
|
331 |
+
else:
|
332 |
+
attn_procs[name] = AttnProcessor()
|
333 |
+
|
334 |
+
unet.set_attn_processor(copy.deepcopy(attn_procs))
|
335 |
+
print("Successfully loaded paired self-attention")
|
336 |
+
print(f"Number of processors: {total_count}")
|
337 |
+
|
338 |
+
|
339 |
+
attn_count = 0
|
340 |
+
total_count = 0
|
341 |
+
cur_step = 0
|
342 |
+
id_length = 4
|
343 |
+
total_length = 5
|
344 |
+
cur_model_type = ""
|
345 |
+
device = "cuda"
|
346 |
+
attn_procs = {}
|
347 |
+
write = False
|
348 |
+
sa32 = 0.5
|
349 |
+
sa64 = 0.5
|
350 |
+
height = 768
|
351 |
+
width = 768
|
352 |
+
|
353 |
+
|
354 |
+
def swap_to_gallery(images):
|
355 |
+
return gr.update(value=images, visible=True), gr.update(visible=True), gr.update(visible=False)
|
356 |
+
|
357 |
+
|
358 |
+
def upload_example_to_gallery(images, prompt, style, negative_prompt):
|
359 |
+
return gr.update(value=images, visible=True), gr.update(visible=True), gr.update(visible=False)
|
360 |
+
|
361 |
+
|
362 |
+
def remove_back_to_files():
|
363 |
+
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
|
364 |
+
|
365 |
+
|
366 |
+
def remove_tips():
|
367 |
+
return gr.update(visible=False)
|
368 |
+
|
369 |
+
|
370 |
+
def apply_style_positive(style_name: str, positive: str):
|
371 |
+
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
|
372 |
+
return p.replace("{prompt}", positive)
|
373 |
+
|
374 |
+
|
375 |
+
def apply_style(style_name: str, positives: list, negative: str = ""):
|
376 |
+
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
|
377 |
+
return [p.replace("{prompt}", positive) for positive in positives], n + ' ' + negative
|
378 |
+
|
379 |
+
|
380 |
+
def change_visiale_by_model_type(_model_type):
|
381 |
+
if _model_type == "Only Using Textual Description":
|
382 |
+
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
|
383 |
+
elif _model_type == "Using Ref Images":
|
384 |
+
return gr.update(visible=True), gr.update(visible=True), gr.update(visible=False)
|
385 |
+
else:
|
386 |
+
raise ValueError("Invalid model type", _model_type)
|
387 |
+
|
388 |
+
|
389 |
+
@spaces.GPU(duration=120)
|
390 |
+
def process_generation(_sd_type, _model_type, _upload_images, _num_steps, style_name, _Ip_Adapter_Strength, _style_strength_ratio, guidance_scale, seed_, sa32_, sa64_, id_length_, general_prompt, negative_prompt, prompt_array, G_height, G_width, _comic_type):
|
391 |
+
global sa32, sa64, id_length, total_length, attn_procs, unet, cur_model_type, device, num_steps, write, cur_step, attn_count, height, width, pipe2, pipe4, sd_model_path, models_dict
|
392 |
+
|
393 |
+
_model_type = "Photomaker" if _model_type == "Using Ref Images" else "original"
|
394 |
+
if _model_type == "Photomaker" and "img" not in general_prompt:
|
395 |
+
raise gr.Error(
|
396 |
+
"Please add the trigger word 'img' behind the class word you want to customize, such as: man img or woman img")
|
397 |
+
if _upload_images is None and _model_type != "original":
|
398 |
+
raise gr.Error("Cannot find any input face image!")
|
399 |
+
if len(prompt_array.splitlines()) > 10:
|
400 |
+
raise gr.Error(
|
401 |
+
f"No more than 10 prompts in Hugging Face demo for speed! But found {len(prompt_array.splitlines())} prompts!")
|
402 |
+
|
403 |
+
height = G_height
|
404 |
+
width = G_width
|
405 |
+
sd_model_path = models_dict[_sd_type]
|
406 |
+
num_steps = _num_steps
|
407 |
+
|
408 |
+
if style_name == "(No style)":
|
409 |
+
sd_model_path = models_dict["RealVision"]
|
410 |
+
|
411 |
+
if _model_type == "original":
|
412 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(
|
413 |
+
sd_model_path, torch_dtype=torch.float16)
|
414 |
+
pipe = pipe.to(device)
|
415 |
+
pipe.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
416 |
+
set_attention_processor(pipe.unet, id_length_, is_ipadapter=False)
|
417 |
+
elif _model_type == "Photomaker":
|
418 |
+
if _sd_type != "RealVision" and style_name != "(No style)":
|
419 |
+
pipe = pipe2.to(device)
|
420 |
+
pipe.id_encoder.to(device)
|
421 |
+
set_attention_processor(pipe.unet, id_length_, is_ipadapter=False)
|
422 |
+
else:
|
423 |
+
pipe = pipe4.to(device)
|
424 |
+
pipe.id_encoder.to(device)
|
425 |
+
set_attention_processor(pipe.unet, id_length_, is_ipadapter=False)
|
426 |
+
else:
|
427 |
+
raise NotImplementedError(
|
428 |
+
"You should choose between original and Photomaker!", f"But you chose {_model_type}")
|
429 |
+
|
430 |
+
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
431 |
+
pipe.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
432 |
+
cur_model_type = _sd_type + "-" + _model_type + str(id_length_)
|
433 |
+
|
434 |
+
if _model_type != "original":
|
435 |
+
input_id_images = [load_image(img) for img in _upload_images]
|
436 |
+
|
437 |
+
prompts = prompt_array.splitlines()
|
438 |
+
start_merge_step = int(float(_style_strength_ratio) / 100 * _num_steps)
|
439 |
+
if start_merge_step > 30:
|
440 |
+
start_merge_step = 30
|
441 |
+
print(f"start_merge_step: {start_merge_step}")
|
442 |
+
|
443 |
+
generator = torch.Generator(device="cuda").manual_seed(seed_)
|
444 |
+
sa32, sa64 = sa32_, sa64_
|
445 |
+
id_length = id_length_
|
446 |
+
clipped_prompts = prompts[:]
|
447 |
+
prompts = [general_prompt + "," + prompt if "[NC]" not in prompt else prompt.replace(
|
448 |
+
"[NC]", "") for prompt in clipped_prompts]
|
449 |
+
prompts = [prompt.rpartition(
|
450 |
+
'#')[0] if "#" in prompt else prompt for prompt in prompts]
|
451 |
+
print(prompts)
|
452 |
+
|
453 |
+
id_prompts = prompts[:id_length]
|
454 |
+
real_prompts = prompts[id_length:]
|
455 |
+
torch.cuda.empty_cache()
|
456 |
+
write = True
|
457 |
+
cur_step = 0
|
458 |
+
|
459 |
+
attn_count = 0
|
460 |
+
id_prompts, negative_prompt = apply_style(
|
461 |
+
style_name, id_prompts, negative_prompt)
|
462 |
+
setup_seed(seed_)
|
463 |
+
total_results = []
|
464 |
+
|
465 |
+
if _model_type == "original":
|
466 |
+
id_images = pipe(id_prompts, num_inference_steps=_num_steps, guidance_scale=guidance_scale,
|
467 |
+
height=height, width=width, negative_prompt=negative_prompt, generator=generator).images
|
468 |
+
elif _model_type == "Photomaker":
|
469 |
+
id_images = pipe(id_prompts, input_id_images=input_id_images, num_inference_steps=_num_steps, guidance_scale=guidance_scale,
|
470 |
+
start_merge_step=start_merge_step, height=height, width=width, negative_prompt=negative_prompt, generator=generator).images
|
471 |
+
else:
|
472 |
+
raise NotImplementedError(
|
473 |
+
"You should choose between original and Photomaker!", f"But you chose {_model_type}")
|
474 |
+
|
475 |
+
total_results = id_images + total_results
|
476 |
+
yield total_results
|
477 |
+
|
478 |
+
real_images = []
|
479 |
+
write = False
|
480 |
+
for real_prompt in real_prompts:
|
481 |
+
setup_seed(seed_)
|
482 |
+
cur_step = 0
|
483 |
+
real_prompt = apply_style_positive(style_name, real_prompt)
|
484 |
+
if _model_type == "original":
|
485 |
+
real_images.append(pipe(real_prompt, num_inference_steps=_num_steps, guidance_scale=guidance_scale,
|
486 |
+
height=height, width=width, negative_prompt=negative_prompt, generator=generator).images[0])
|
487 |
+
elif _model_type == "Photomaker":
|
488 |
+
real_images.append(pipe(real_prompt, input_id_images=input_id_images, num_inference_steps=_num_steps, guidance_scale=guidance_scale,
|
489 |
+
start_merge_step=start_merge_step, height=height, width=width, negative_prompt=negative_prompt, generator=generator).images[0])
|
490 |
+
else:
|
491 |
+
raise NotImplementedError(
|
492 |
+
"You should choose between original and Photomaker!", f"But you chose {_model_type}")
|
493 |
+
total_results = [real_images[-1]] + total_results
|
494 |
+
yield total_results
|
495 |
+
|
496 |
+
if _comic_type != "No typesetting (default)":
|
497 |
+
from PIL import ImageFont
|
498 |
+
captions = prompt_array.splitlines()
|
499 |
+
captions = [caption.replace("[NC]", "") for caption in captions]
|
500 |
+
captions = [caption.split(
|
501 |
+
'#')[-1] if "#" in caption else caption for caption in captions]
|
502 |
+
total_results = get_comic(id_images + real_images, _comic_type, captions=captions,
|
503 |
+
font=ImageFont.truetype("./storyDiffusion/fonts/Inkfree.ttf", int(45))) + total_results
|
504 |
+
|
505 |
+
if _model_type == "Photomaker":
|
506 |
+
pipe = pipe2.to("cpu")
|
507 |
+
pipe.id_encoder.to("cpu")
|
508 |
+
set_attention_processor(pipe.unet, id_length_, is_ipadapter=False)
|
509 |
+
|
510 |
+
yield total_results
|
511 |
+
|
512 |
+
|
513 |
+
# Initialize pipelines
|
514 |
+
pipe2 = PhotoMakerStableDiffusionXLPipeline.from_pretrained(
|
515 |
+
models_dict["Unstable"], torch_dtype=torch.float16, use_safetensors=False)
|
516 |
+
pipe2 = pipe2.to("cpu")
|
517 |
+
pipe2.load_photomaker_adapter(
|
518 |
+
os.path.dirname(photomaker_path),
|
519 |
+
subfolder="",
|
520 |
+
weight_name=os.path.basename(photomaker_path),
|
521 |
+
trigger_word="img"
|
522 |
+
)
|
523 |
+
pipe2 = pipe2.to("cpu")
|
524 |
+
pipe2.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
525 |
+
pipe2.fuse_lora()
|
526 |
+
|
527 |
+
pipe4 = PhotoMakerStableDiffusionXLPipeline.from_pretrained(
|
528 |
+
models_dict["RealVision"], torch_dtype=torch.float16, use_safetensors=True)
|
529 |
+
pipe4 = pipe4.to("cpu")
|
530 |
+
pipe4.load_photomaker_adapter(
|
531 |
+
os.path.dirname(photomaker_path),
|
532 |
+
subfolder="",
|
533 |
+
weight_name=os.path.basename(photomaker_path),
|
534 |
+
trigger_word="img"
|
535 |
+
)
|
536 |
+
pipe4 = pipe4.to("cpu")
|
537 |
+
pipe4.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
538 |
+
pipe4.fuse_lora()
|
539 |
+
|
540 |
+
|
541 |
+
def story_generation_ui():
|
542 |
+
with gr.Row():
|
543 |
+
with gr.Group(elem_id="main-image"):
|
544 |
+
prompts = []
|
545 |
+
colors = []
|
546 |
+
with gr.Column(visible=True) as gen_prompt_vis:
|
547 |
+
sd_type = gr.Dropdown(choices=list(models_dict.keys(
|
548 |
+
)), value="Unstable", label="sd_type", info="Select pretrained model")
|
549 |
+
model_type = gr.Radio(["Only Using Textual Description", "Using Ref Images"], label="model_type",
|
550 |
+
value="Only Using Textual Description", info="Control type of the Character")
|
551 |
+
with gr.Group(visible=False) as control_image_input:
|
552 |
+
files = gr.Files(
|
553 |
+
label="Drag (Select) 1 or more photos of your face",
|
554 |
+
file_types=["image"],
|
555 |
+
)
|
556 |
+
uploaded_files = gr.Gallery(
|
557 |
+
label="Your images", visible=False, columns=5, rows=1, height=200)
|
558 |
+
with gr.Column(visible=False) as clear_button:
|
559 |
+
remove_and_reupload = gr.ClearButton(
|
560 |
+
value="Remove and upload new ones", components=files, size="sm")
|
561 |
+
general_prompt = gr.Textbox(
|
562 |
+
value='', label="(1) Textual Description for Character", interactive=True)
|
563 |
+
negative_prompt = gr.Textbox(
|
564 |
+
value='', label="(2) Negative_prompt", interactive=True)
|
565 |
+
style = gr.Dropdown(
|
566 |
+
label="Style template", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
|
567 |
+
prompt_array = gr.Textbox(
|
568 |
+
lines=3, value='', label="(3) Comic Description (each line corresponds to a frame).", interactive=True)
|
569 |
+
with gr.Accordion("(4) Tune the hyperparameters", open=True):
|
570 |
+
sa32_ = gr.Slider(label="(The degree of Paired Attention at 32 x 32 self-attention layers)",
|
571 |
+
minimum=0, maximum=1., value=0.7, step=0.1)
|
572 |
+
sa64_ = gr.Slider(label="(The degree of Paired Attention at 64 x 64 self-attention layers)",
|
573 |
+
minimum=0, maximum=1., value=0.7, step=0.1)
|
574 |
+
id_length_ = gr.Slider(
|
575 |
+
label="Number of id images in total images", minimum=2, maximum=4, value=3, step=1)
|
576 |
+
seed_ = gr.Slider(label="Seed", minimum=-1,
|
577 |
+
maximum=MAX_SEED, value=0, step=1)
|
578 |
+
num_steps = gr.Slider(
|
579 |
+
label="Number of sample steps",
|
580 |
+
minimum=25,
|
581 |
+
maximum=50,
|
582 |
+
step=1,
|
583 |
+
value=50,
|
584 |
+
)
|
585 |
+
G_height = gr.Slider(
|
586 |
+
label="height",
|
587 |
+
minimum=256,
|
588 |
+
maximum=1024,
|
589 |
+
step=32,
|
590 |
+
value=1024,
|
591 |
+
)
|
592 |
+
G_width = gr.Slider(
|
593 |
+
label="width",
|
594 |
+
minimum=256,
|
595 |
+
maximum=1024,
|
596 |
+
step=32,
|
597 |
+
value=1024,
|
598 |
+
)
|
599 |
+
comic_type = gr.Radio(["No typesetting (default)", "Four Pannel", "Classic Comic Style"],
|
600 |
+
value="Classic Comic Style", label="Typesetting Style", info="Select the typesetting style ")
|
601 |
+
guidance_scale = gr.Slider(
|
602 |
+
label="Guidance scale",
|
603 |
+
minimum=0.1,
|
604 |
+
maximum=10.0,
|
605 |
+
step=0.1,
|
606 |
+
value=5,
|
607 |
+
)
|
608 |
+
style_strength_ratio = gr.Slider(
|
609 |
+
label="Style strength of Ref Image (%)",
|
610 |
+
minimum=15,
|
611 |
+
maximum=50,
|
612 |
+
step=1,
|
613 |
+
value=20,
|
614 |
+
visible=False
|
615 |
+
)
|
616 |
+
Ip_Adapter_Strength = gr.Slider(
|
617 |
+
label="Ip_Adapter_Strength",
|
618 |
+
minimum=0,
|
619 |
+
maximum=1,
|
620 |
+
step=0.1,
|
621 |
+
value=0.5,
|
622 |
+
visible=False
|
623 |
+
)
|
624 |
+
final_run_btn = gr.Button("Generate ! 😺")
|
625 |
+
|
626 |
+
with gr.Column():
|
627 |
+
out_image = gr.Gallery(label="Result", columns=2, height='auto')
|
628 |
+
generated_information = gr.Markdown(
|
629 |
+
label="Generation Details", value="", visible=False)
|
630 |
+
|
631 |
+
model_type.change(fn=change_visiale_by_model_type, inputs=model_type, outputs=[
|
632 |
+
control_image_input, style_strength_ratio, Ip_Adapter_Strength])
|
633 |
+
files.upload(fn=swap_to_gallery, inputs=files, outputs=[
|
634 |
+
uploaded_files, clear_button, files])
|
635 |
+
remove_and_reupload.click(fn=remove_back_to_files, outputs=[
|
636 |
+
uploaded_files, clear_button, files])
|
637 |
+
|
638 |
+
final_run_btn.click(fn=set_text_unfinished, outputs=generated_information
|
639 |
+
).then(process_generation, inputs=[sd_type, model_type, files, num_steps, style, Ip_Adapter_Strength, style_strength_ratio, guidance_scale, seed_, sa32_, sa64_, id_length_, general_prompt, negative_prompt, prompt_array, G_height, G_width, comic_type], outputs=out_image
|
640 |
+
).then(fn=set_text_finished, outputs=generated_information)
|
641 |
+
|
642 |
+
gr.Examples(
|
643 |
+
examples=get_examples(),
|
644 |
+
inputs=[seed_, sa32_, sa64_, id_length_, general_prompt, negative_prompt,
|
645 |
+
prompt_array, style, model_type, files, G_height, G_width],
|
646 |
+
label='😺 Examples 😺',
|
647 |
+
)
|
requirements.txt
CHANGED
@@ -21,8 +21,9 @@ plyfile
|
|
21 |
xformers
|
22 |
git+https://github.com/NVlabs/nvdiffrast/
|
23 |
huggingface-hub
|
24 |
-
|
25 |
httpx
|
26 |
flask
|
27 |
pillow
|
28 |
-
safetensors
|
|
|
|
21 |
xformers
|
22 |
git+https://github.com/NVlabs/nvdiffrast/
|
23 |
huggingface-hub
|
24 |
+
numpy
|
25 |
httpx
|
26 |
flask
|
27 |
pillow
|
28 |
+
safetensors
|
29 |
+
peft
|
storyDiffusion/fonts/Inkfree.ttf
ADDED
Binary file (41.2 kB). View file
|
|
storyDiffusion/utils/__init__.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .model import PhotoMakerIDEncoder
|
2 |
+
from .pipeline import PhotoMakerStableDiffusionXLPipeline
|
3 |
+
|
4 |
+
__all__ = [
|
5 |
+
"PhotoMakerIDEncoder",
|
6 |
+
"PhotoMakerStableDiffusionXLPipeline",
|
7 |
+
]
|
storyDiffusion/utils/gradio_utils.py
ADDED
@@ -0,0 +1,416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import random
|
3 |
+
import torch.nn as nn
|
4 |
+
import torch.nn.functional as F
|
5 |
+
|
6 |
+
|
7 |
+
class SpatialAttnProcessor2_0(torch.nn.Module):
|
8 |
+
r"""
|
9 |
+
Attention processor for IP-Adapater for PyTorch 2.0.
|
10 |
+
Args:
|
11 |
+
hidden_size (`int`):
|
12 |
+
The hidden size of the attention layer.
|
13 |
+
cross_attention_dim (`int`):
|
14 |
+
The number of channels in the `encoder_hidden_states`.
|
15 |
+
text_context_len (`int`, defaults to 77):
|
16 |
+
The context length of the text features.
|
17 |
+
scale (`float`, defaults to 1.0):
|
18 |
+
the weight scale of image prompt.
|
19 |
+
"""
|
20 |
+
|
21 |
+
def __init__(self, hidden_size = None, cross_attention_dim=None,id_length = 4,device = "cuda",dtype = torch.float16):
|
22 |
+
super().__init__()
|
23 |
+
if not hasattr(F, "scaled_dot_product_attention"):
|
24 |
+
raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
|
25 |
+
self.device = device
|
26 |
+
self.dtype = dtype
|
27 |
+
self.hidden_size = hidden_size
|
28 |
+
self.cross_attention_dim = cross_attention_dim
|
29 |
+
self.total_length = id_length + 1
|
30 |
+
self.id_length = id_length
|
31 |
+
self.id_bank = {}
|
32 |
+
|
33 |
+
def __call__(
|
34 |
+
self,
|
35 |
+
attn,
|
36 |
+
hidden_states,
|
37 |
+
encoder_hidden_states=None,
|
38 |
+
attention_mask=None,
|
39 |
+
temb=None):
|
40 |
+
# un_cond_hidden_states, cond_hidden_states = hidden_states.chunk(2)
|
41 |
+
# un_cond_hidden_states = self.__call2__(attn, un_cond_hidden_states,encoder_hidden_states,attention_mask,temb)
|
42 |
+
# 生成一个0到1之间的随机数
|
43 |
+
global total_count,attn_count,cur_step,mask256,mask1024,mask4096
|
44 |
+
global sa16, sa32, sa64
|
45 |
+
global write
|
46 |
+
if write:
|
47 |
+
self.id_bank[cur_step] = [hidden_states[:self.id_length], hidden_states[self.id_length:]]
|
48 |
+
else:
|
49 |
+
encoder_hidden_states = torch.cat(self.id_bank[cur_step][0],hidden_states[:1],self.id_bank[cur_step][1],hidden_states[1:])
|
50 |
+
# 判断随机数是否大于0.5
|
51 |
+
if cur_step <5:
|
52 |
+
hidden_states = self.__call2__(attn, hidden_states,encoder_hidden_states,attention_mask,temb)
|
53 |
+
else: # 256 1024 4096
|
54 |
+
random_number = random.random()
|
55 |
+
if cur_step <20:
|
56 |
+
rand_num = 0.3
|
57 |
+
else:
|
58 |
+
rand_num = 0.1
|
59 |
+
if random_number > rand_num:
|
60 |
+
if not write:
|
61 |
+
if hidden_states.shape[1] == 32* 32:
|
62 |
+
attention_mask = mask1024[mask1024.shape[0] // self.total_length * self.id_length:]
|
63 |
+
elif hidden_states.shape[1] ==16*16:
|
64 |
+
attention_mask = mask256[mask256.shape[0] // self.total_length * self.id_length:]
|
65 |
+
else:
|
66 |
+
attention_mask = mask4096[mask4096.shape[0] // self.total_length * self.id_length:]
|
67 |
+
else:
|
68 |
+
if hidden_states.shape[1] == 32* 32:
|
69 |
+
attention_mask = mask1024[:mask1024.shape[0] // self.total_length * self.id_length]
|
70 |
+
elif hidden_states.shape[1] ==16*16:
|
71 |
+
attention_mask = mask256[:mask256.shape[0] // self.total_length * self.id_length]
|
72 |
+
else:
|
73 |
+
attention_mask = mask4096[:mask4096.shape[0] // self.total_length * self.id_length]
|
74 |
+
hidden_states = self.__call1__(attn, hidden_states,encoder_hidden_states,attention_mask,temb)
|
75 |
+
else:
|
76 |
+
hidden_states = self.__call2__(attn, hidden_states,None,attention_mask,temb)
|
77 |
+
attn_count +=1
|
78 |
+
if attn_count == total_count:
|
79 |
+
attn_count = 0
|
80 |
+
cur_step += 1
|
81 |
+
mask256,mask1024,mask4096 = cal_attn_mask(self.total_length,self.id_length,sa16,sa32,sa64, device=self.device, dtype= self.dtype)
|
82 |
+
|
83 |
+
return hidden_states
|
84 |
+
def __call1__(
|
85 |
+
self,
|
86 |
+
attn,
|
87 |
+
hidden_states,
|
88 |
+
encoder_hidden_states=None,
|
89 |
+
attention_mask=None,
|
90 |
+
temb=None,
|
91 |
+
):
|
92 |
+
residual = hidden_states
|
93 |
+
if encoder_hidden_states is not None:
|
94 |
+
raise Exception("not implement")
|
95 |
+
if attn.spatial_norm is not None:
|
96 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
97 |
+
input_ndim = hidden_states.ndim
|
98 |
+
|
99 |
+
if input_ndim == 4:
|
100 |
+
total_batch_size, channel, height, width = hidden_states.shape
|
101 |
+
hidden_states = hidden_states.view(total_batch_size, channel, height * width).transpose(1, 2)
|
102 |
+
total_batch_size,nums_token,channel = hidden_states.shape
|
103 |
+
img_nums = total_batch_size//2
|
104 |
+
hidden_states = hidden_states.view(-1,img_nums,nums_token,channel).reshape(-1,img_nums * nums_token,channel)
|
105 |
+
|
106 |
+
batch_size, sequence_length, _ = hidden_states.shape
|
107 |
+
|
108 |
+
if attn.group_norm is not None:
|
109 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
110 |
+
|
111 |
+
query = attn.to_q(hidden_states)
|
112 |
+
|
113 |
+
if encoder_hidden_states is None:
|
114 |
+
encoder_hidden_states = hidden_states # B, N, C
|
115 |
+
else:
|
116 |
+
encoder_hidden_states = encoder_hidden_states.view(-1,self.id_length+1,nums_token,channel).reshape(-1,(self.id_length+1) * nums_token,channel)
|
117 |
+
|
118 |
+
key = attn.to_k(encoder_hidden_states)
|
119 |
+
value = attn.to_v(encoder_hidden_states)
|
120 |
+
|
121 |
+
|
122 |
+
inner_dim = key.shape[-1]
|
123 |
+
head_dim = inner_dim // attn.heads
|
124 |
+
|
125 |
+
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
126 |
+
|
127 |
+
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
128 |
+
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
129 |
+
|
130 |
+
# the output of sdp = (batch, num_heads, seq_len, head_dim)
|
131 |
+
# TODO: add support for attn.scale when we move to Torch 2.1
|
132 |
+
hidden_states = F.scaled_dot_product_attention(
|
133 |
+
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
|
134 |
+
)
|
135 |
+
|
136 |
+
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
|
137 |
+
hidden_states = hidden_states.to(query.dtype)
|
138 |
+
|
139 |
+
|
140 |
+
|
141 |
+
# linear proj
|
142 |
+
hidden_states = attn.to_out[0](hidden_states)
|
143 |
+
# dropout
|
144 |
+
hidden_states = attn.to_out[1](hidden_states)
|
145 |
+
|
146 |
+
# if input_ndim == 4:
|
147 |
+
# tile_hidden_states = tile_hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
148 |
+
|
149 |
+
# if attn.residual_connection:
|
150 |
+
# tile_hidden_states = tile_hidden_states + residual
|
151 |
+
|
152 |
+
if input_ndim == 4:
|
153 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(total_batch_size, channel, height, width)
|
154 |
+
if attn.residual_connection:
|
155 |
+
hidden_states = hidden_states + residual
|
156 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
157 |
+
|
158 |
+
return hidden_states
|
159 |
+
def __call2__(
|
160 |
+
self,
|
161 |
+
attn,
|
162 |
+
hidden_states,
|
163 |
+
encoder_hidden_states=None,
|
164 |
+
attention_mask=None,
|
165 |
+
temb=None):
|
166 |
+
residual = hidden_states
|
167 |
+
|
168 |
+
if attn.spatial_norm is not None:
|
169 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
170 |
+
|
171 |
+
input_ndim = hidden_states.ndim
|
172 |
+
|
173 |
+
if input_ndim == 4:
|
174 |
+
batch_size, channel, height, width = hidden_states.shape
|
175 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
176 |
+
|
177 |
+
batch_size, sequence_length, _ = (
|
178 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
179 |
+
)
|
180 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
181 |
+
|
182 |
+
if attn.group_norm is not None:
|
183 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
184 |
+
|
185 |
+
query = attn.to_q(hidden_states)
|
186 |
+
|
187 |
+
if encoder_hidden_states is None:
|
188 |
+
encoder_hidden_states = hidden_states
|
189 |
+
elif attn.norm_cross:
|
190 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
191 |
+
|
192 |
+
key = attn.to_k(encoder_hidden_states)
|
193 |
+
value = attn.to_v(encoder_hidden_states)
|
194 |
+
|
195 |
+
query = attn.head_to_batch_dim(query)
|
196 |
+
key = attn.head_to_batch_dim(key)
|
197 |
+
value = attn.head_to_batch_dim(value)
|
198 |
+
|
199 |
+
attention_probs = attn.get_attention_scores(query, key, attention_mask)
|
200 |
+
hidden_states = torch.bmm(attention_probs, value)
|
201 |
+
hidden_states = attn.batch_to_head_dim(hidden_states)
|
202 |
+
|
203 |
+
# linear proj
|
204 |
+
hidden_states = attn.to_out[0](hidden_states)
|
205 |
+
# dropout
|
206 |
+
hidden_states = attn.to_out[1](hidden_states)
|
207 |
+
|
208 |
+
if input_ndim == 4:
|
209 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
210 |
+
|
211 |
+
if attn.residual_connection:
|
212 |
+
hidden_states = hidden_states + residual
|
213 |
+
|
214 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
215 |
+
|
216 |
+
return hidden_states
|
217 |
+
|
218 |
+
|
219 |
+
def cal_attn_mask(total_length,id_length,sa16,sa32,sa64,device="cuda",dtype= torch.float16):
|
220 |
+
bool_matrix256 = torch.rand((1, total_length * 256),device = device,dtype = dtype) < sa16
|
221 |
+
bool_matrix1024 = torch.rand((1, total_length * 1024),device = device,dtype = dtype) < sa32
|
222 |
+
bool_matrix4096 = torch.rand((1, total_length * 4096),device = device,dtype = dtype) < sa64
|
223 |
+
bool_matrix256 = bool_matrix256.repeat(total_length,1)
|
224 |
+
bool_matrix1024 = bool_matrix1024.repeat(total_length,1)
|
225 |
+
bool_matrix4096 = bool_matrix4096.repeat(total_length,1)
|
226 |
+
for i in range(total_length):
|
227 |
+
bool_matrix256[i:i+1,id_length*256:] = False
|
228 |
+
bool_matrix1024[i:i+1,id_length*1024:] = False
|
229 |
+
bool_matrix4096[i:i+1,id_length*4096:] = False
|
230 |
+
bool_matrix256[i:i+1,i*256:(i+1)*256] = True
|
231 |
+
bool_matrix1024[i:i+1,i*1024:(i+1)*1024] = True
|
232 |
+
bool_matrix4096[i:i+1,i*4096:(i+1)*4096] = True
|
233 |
+
mask256 = bool_matrix256.unsqueeze(1).repeat(1,256,1).reshape(-1,total_length * 256)
|
234 |
+
mask1024 = bool_matrix1024.unsqueeze(1).repeat(1,1024,1).reshape(-1,total_length * 1024)
|
235 |
+
mask4096 = bool_matrix4096.unsqueeze(1).repeat(1,4096,1).reshape(-1,total_length * 4096)
|
236 |
+
return mask256,mask1024,mask4096
|
237 |
+
|
238 |
+
def cal_attn_mask_xl(total_length,id_length,sa32,sa64,height,width,device="cuda",dtype= torch.float16):
|
239 |
+
nums_1024 = (height // 32) * (width // 32)
|
240 |
+
nums_4096 = (height // 16) * (width // 16)
|
241 |
+
bool_matrix1024 = torch.rand((1, total_length * nums_1024),device = device,dtype = dtype) < sa32
|
242 |
+
bool_matrix4096 = torch.rand((1, total_length * nums_4096),device = device,dtype = dtype) < sa64
|
243 |
+
bool_matrix1024 = bool_matrix1024.repeat(total_length,1)
|
244 |
+
bool_matrix4096 = bool_matrix4096.repeat(total_length,1)
|
245 |
+
for i in range(total_length):
|
246 |
+
bool_matrix1024[i:i+1,id_length*nums_1024:] = False
|
247 |
+
bool_matrix4096[i:i+1,id_length*nums_4096:] = False
|
248 |
+
bool_matrix1024[i:i+1,i*nums_1024:(i+1)*nums_1024] = True
|
249 |
+
bool_matrix4096[i:i+1,i*nums_4096:(i+1)*nums_4096] = True
|
250 |
+
mask1024 = bool_matrix1024.unsqueeze(1).repeat(1,nums_1024,1).reshape(-1,total_length * nums_1024)
|
251 |
+
mask4096 = bool_matrix4096.unsqueeze(1).repeat(1,nums_4096,1).reshape(-1,total_length * nums_4096)
|
252 |
+
return mask1024,mask4096
|
253 |
+
|
254 |
+
|
255 |
+
class AttnProcessor(nn.Module):
|
256 |
+
r"""
|
257 |
+
Default processor for performing attention-related computations.
|
258 |
+
"""
|
259 |
+
def __init__(
|
260 |
+
self,
|
261 |
+
hidden_size=None,
|
262 |
+
cross_attention_dim=None,
|
263 |
+
):
|
264 |
+
super().__init__()
|
265 |
+
|
266 |
+
def __call__(
|
267 |
+
self,
|
268 |
+
attn,
|
269 |
+
hidden_states,
|
270 |
+
encoder_hidden_states=None,
|
271 |
+
attention_mask=None,
|
272 |
+
temb=None,
|
273 |
+
):
|
274 |
+
residual = hidden_states
|
275 |
+
|
276 |
+
if attn.spatial_norm is not None:
|
277 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
278 |
+
|
279 |
+
input_ndim = hidden_states.ndim
|
280 |
+
|
281 |
+
if input_ndim == 4:
|
282 |
+
batch_size, channel, height, width = hidden_states.shape
|
283 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
284 |
+
|
285 |
+
batch_size, sequence_length, _ = (
|
286 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
287 |
+
)
|
288 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
289 |
+
|
290 |
+
if attn.group_norm is not None:
|
291 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
292 |
+
|
293 |
+
query = attn.to_q(hidden_states)
|
294 |
+
|
295 |
+
if encoder_hidden_states is None:
|
296 |
+
encoder_hidden_states = hidden_states
|
297 |
+
elif attn.norm_cross:
|
298 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
299 |
+
|
300 |
+
key = attn.to_k(encoder_hidden_states)
|
301 |
+
value = attn.to_v(encoder_hidden_states)
|
302 |
+
|
303 |
+
query = attn.head_to_batch_dim(query)
|
304 |
+
key = attn.head_to_batch_dim(key)
|
305 |
+
value = attn.head_to_batch_dim(value)
|
306 |
+
|
307 |
+
attention_probs = attn.get_attention_scores(query, key, attention_mask)
|
308 |
+
hidden_states = torch.bmm(attention_probs, value)
|
309 |
+
hidden_states = attn.batch_to_head_dim(hidden_states)
|
310 |
+
|
311 |
+
# linear proj
|
312 |
+
hidden_states = attn.to_out[0](hidden_states)
|
313 |
+
# dropout
|
314 |
+
hidden_states = attn.to_out[1](hidden_states)
|
315 |
+
|
316 |
+
if input_ndim == 4:
|
317 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
318 |
+
|
319 |
+
if attn.residual_connection:
|
320 |
+
hidden_states = hidden_states + residual
|
321 |
+
|
322 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
323 |
+
|
324 |
+
return hidden_states
|
325 |
+
|
326 |
+
|
327 |
+
class AttnProcessor2_0(torch.nn.Module):
|
328 |
+
r"""
|
329 |
+
Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
|
330 |
+
"""
|
331 |
+
def __init__(
|
332 |
+
self,
|
333 |
+
hidden_size=None,
|
334 |
+
cross_attention_dim=None,
|
335 |
+
):
|
336 |
+
super().__init__()
|
337 |
+
if not hasattr(F, "scaled_dot_product_attention"):
|
338 |
+
raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
|
339 |
+
|
340 |
+
def __call__(
|
341 |
+
self,
|
342 |
+
attn,
|
343 |
+
hidden_states,
|
344 |
+
encoder_hidden_states=None,
|
345 |
+
attention_mask=None,
|
346 |
+
temb=None,
|
347 |
+
):
|
348 |
+
residual = hidden_states
|
349 |
+
|
350 |
+
if attn.spatial_norm is not None:
|
351 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
352 |
+
|
353 |
+
input_ndim = hidden_states.ndim
|
354 |
+
|
355 |
+
if input_ndim == 4:
|
356 |
+
batch_size, channel, height, width = hidden_states.shape
|
357 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
358 |
+
|
359 |
+
batch_size, sequence_length, _ = (
|
360 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
361 |
+
)
|
362 |
+
|
363 |
+
if attention_mask is not None:
|
364 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
365 |
+
# scaled_dot_product_attention expects attention_mask shape to be
|
366 |
+
# (batch, heads, source_length, target_length)
|
367 |
+
attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
|
368 |
+
|
369 |
+
if attn.group_norm is not None:
|
370 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
371 |
+
|
372 |
+
query = attn.to_q(hidden_states)
|
373 |
+
|
374 |
+
if encoder_hidden_states is None:
|
375 |
+
encoder_hidden_states = hidden_states
|
376 |
+
elif attn.norm_cross:
|
377 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
378 |
+
|
379 |
+
key = attn.to_k(encoder_hidden_states)
|
380 |
+
value = attn.to_v(encoder_hidden_states)
|
381 |
+
|
382 |
+
inner_dim = key.shape[-1]
|
383 |
+
head_dim = inner_dim // attn.heads
|
384 |
+
|
385 |
+
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
386 |
+
|
387 |
+
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
388 |
+
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
389 |
+
|
390 |
+
# the output of sdp = (batch, num_heads, seq_len, head_dim)
|
391 |
+
# TODO: add support for attn.scale when we move to Torch 2.1
|
392 |
+
hidden_states = F.scaled_dot_product_attention(
|
393 |
+
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
|
394 |
+
)
|
395 |
+
|
396 |
+
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
|
397 |
+
hidden_states = hidden_states.to(query.dtype)
|
398 |
+
|
399 |
+
# linear proj
|
400 |
+
hidden_states = attn.to_out[0](hidden_states)
|
401 |
+
# dropout
|
402 |
+
hidden_states = attn.to_out[1](hidden_states)
|
403 |
+
|
404 |
+
if input_ndim == 4:
|
405 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
406 |
+
|
407 |
+
if attn.residual_connection:
|
408 |
+
hidden_states = hidden_states + residual
|
409 |
+
|
410 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
411 |
+
|
412 |
+
return hidden_states
|
413 |
+
|
414 |
+
|
415 |
+
def is_torch2_available():
|
416 |
+
return hasattr(F, "scaled_dot_product_attention")
|
storyDiffusion/utils/model.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Merge image encoder and fuse module to create an ID Encoder
|
2 |
+
# send multiple ID images, we can directly obtain the updated text encoder containing a stacked ID embedding
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
from transformers.models.clip.modeling_clip import CLIPVisionModelWithProjection
|
7 |
+
from transformers.models.clip.configuration_clip import CLIPVisionConfig
|
8 |
+
from transformers import PretrainedConfig
|
9 |
+
|
10 |
+
VISION_CONFIG_DICT = {
|
11 |
+
"hidden_size": 1024,
|
12 |
+
"intermediate_size": 4096,
|
13 |
+
"num_attention_heads": 16,
|
14 |
+
"num_hidden_layers": 24,
|
15 |
+
"patch_size": 14,
|
16 |
+
"projection_dim": 768
|
17 |
+
}
|
18 |
+
|
19 |
+
class MLP(nn.Module):
|
20 |
+
def __init__(self, in_dim, out_dim, hidden_dim, use_residual=True):
|
21 |
+
super().__init__()
|
22 |
+
if use_residual:
|
23 |
+
assert in_dim == out_dim
|
24 |
+
self.layernorm = nn.LayerNorm(in_dim)
|
25 |
+
self.fc1 = nn.Linear(in_dim, hidden_dim)
|
26 |
+
self.fc2 = nn.Linear(hidden_dim, out_dim)
|
27 |
+
self.use_residual = use_residual
|
28 |
+
self.act_fn = nn.GELU()
|
29 |
+
|
30 |
+
def forward(self, x):
|
31 |
+
residual = x
|
32 |
+
x = self.layernorm(x)
|
33 |
+
x = self.fc1(x)
|
34 |
+
x = self.act_fn(x)
|
35 |
+
x = self.fc2(x)
|
36 |
+
if self.use_residual:
|
37 |
+
x = x + residual
|
38 |
+
return x
|
39 |
+
|
40 |
+
|
41 |
+
class FuseModule(nn.Module):
|
42 |
+
def __init__(self, embed_dim):
|
43 |
+
super().__init__()
|
44 |
+
self.mlp1 = MLP(embed_dim * 2, embed_dim, embed_dim, use_residual=False)
|
45 |
+
self.mlp2 = MLP(embed_dim, embed_dim, embed_dim, use_residual=True)
|
46 |
+
self.layer_norm = nn.LayerNorm(embed_dim)
|
47 |
+
|
48 |
+
def fuse_fn(self, prompt_embeds, id_embeds):
|
49 |
+
stacked_id_embeds = torch.cat([prompt_embeds, id_embeds], dim=-1)
|
50 |
+
stacked_id_embeds = self.mlp1(stacked_id_embeds) + prompt_embeds
|
51 |
+
stacked_id_embeds = self.mlp2(stacked_id_embeds)
|
52 |
+
stacked_id_embeds = self.layer_norm(stacked_id_embeds)
|
53 |
+
return stacked_id_embeds
|
54 |
+
|
55 |
+
def forward(
|
56 |
+
self,
|
57 |
+
prompt_embeds,
|
58 |
+
id_embeds,
|
59 |
+
class_tokens_mask,
|
60 |
+
) -> torch.Tensor:
|
61 |
+
# id_embeds shape: [b, max_num_inputs, 1, 2048]
|
62 |
+
id_embeds = id_embeds.to(prompt_embeds.dtype)
|
63 |
+
num_inputs = class_tokens_mask.sum().unsqueeze(0) # TODO: check for training case
|
64 |
+
batch_size, max_num_inputs = id_embeds.shape[:2]
|
65 |
+
# seq_length: 77
|
66 |
+
seq_length = prompt_embeds.shape[1]
|
67 |
+
# flat_id_embeds shape: [b*max_num_inputs, 1, 2048]
|
68 |
+
flat_id_embeds = id_embeds.view(
|
69 |
+
-1, id_embeds.shape[-2], id_embeds.shape[-1]
|
70 |
+
)
|
71 |
+
# valid_id_mask [b*max_num_inputs]
|
72 |
+
valid_id_mask = (
|
73 |
+
torch.arange(max_num_inputs, device=flat_id_embeds.device)[None, :]
|
74 |
+
< num_inputs[:, None]
|
75 |
+
)
|
76 |
+
valid_id_embeds = flat_id_embeds[valid_id_mask.flatten()]
|
77 |
+
|
78 |
+
prompt_embeds = prompt_embeds.view(-1, prompt_embeds.shape[-1])
|
79 |
+
class_tokens_mask = class_tokens_mask.view(-1)
|
80 |
+
valid_id_embeds = valid_id_embeds.view(-1, valid_id_embeds.shape[-1])
|
81 |
+
# slice out the image token embeddings
|
82 |
+
image_token_embeds = prompt_embeds[class_tokens_mask]
|
83 |
+
stacked_id_embeds = self.fuse_fn(image_token_embeds, valid_id_embeds)
|
84 |
+
assert class_tokens_mask.sum() == stacked_id_embeds.shape[0], f"{class_tokens_mask.sum()} != {stacked_id_embeds.shape[0]}"
|
85 |
+
prompt_embeds.masked_scatter_(class_tokens_mask[:, None], stacked_id_embeds.to(prompt_embeds.dtype))
|
86 |
+
updated_prompt_embeds = prompt_embeds.view(batch_size, seq_length, -1)
|
87 |
+
return updated_prompt_embeds
|
88 |
+
|
89 |
+
class PhotoMakerIDEncoder(CLIPVisionModelWithProjection):
|
90 |
+
def __init__(self):
|
91 |
+
super().__init__(CLIPVisionConfig(**VISION_CONFIG_DICT))
|
92 |
+
self.visual_projection_2 = nn.Linear(1024, 1280, bias=False)
|
93 |
+
self.fuse_module = FuseModule(2048)
|
94 |
+
|
95 |
+
def forward(self, id_pixel_values, prompt_embeds, class_tokens_mask):
|
96 |
+
b, num_inputs, c, h, w = id_pixel_values.shape
|
97 |
+
id_pixel_values = id_pixel_values.view(b * num_inputs, c, h, w)
|
98 |
+
|
99 |
+
shared_id_embeds = self.vision_model(id_pixel_values)[1]
|
100 |
+
id_embeds = self.visual_projection(shared_id_embeds)
|
101 |
+
id_embeds_2 = self.visual_projection_2(shared_id_embeds)
|
102 |
+
|
103 |
+
id_embeds = id_embeds.view(b, num_inputs, 1, -1)
|
104 |
+
id_embeds_2 = id_embeds_2.view(b, num_inputs, 1, -1)
|
105 |
+
|
106 |
+
id_embeds = torch.cat((id_embeds, id_embeds_2), dim=-1)
|
107 |
+
updated_prompt_embeds = self.fuse_module(prompt_embeds, id_embeds, class_tokens_mask)
|
108 |
+
|
109 |
+
return updated_prompt_embeds
|
110 |
+
|
111 |
+
|
112 |
+
if __name__ == "__main__":
|
113 |
+
PhotoMakerIDEncoder()
|
storyDiffusion/utils/pipeline.py
ADDED
@@ -0,0 +1,577 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Callable, Dict, List, Optional, Union, Tuple
|
2 |
+
from collections import OrderedDict
|
3 |
+
import os
|
4 |
+
import PIL
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from torchvision import transforms as T
|
9 |
+
|
10 |
+
from safetensors import safe_open
|
11 |
+
from huggingface_hub.utils import validate_hf_hub_args
|
12 |
+
from transformers import CLIPImageProcessor, CLIPTokenizer
|
13 |
+
from diffusers import StableDiffusionXLPipeline
|
14 |
+
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
|
15 |
+
from diffusers.utils import (
|
16 |
+
_get_model_file,
|
17 |
+
is_transformers_available,
|
18 |
+
logging,
|
19 |
+
)
|
20 |
+
|
21 |
+
from . import PhotoMakerIDEncoder
|
22 |
+
|
23 |
+
PipelineImageInput = Union[
|
24 |
+
PIL.Image.Image,
|
25 |
+
torch.FloatTensor,
|
26 |
+
List[PIL.Image.Image],
|
27 |
+
List[torch.FloatTensor],
|
28 |
+
]
|
29 |
+
|
30 |
+
|
31 |
+
class PhotoMakerStableDiffusionXLPipeline(StableDiffusionXLPipeline):
|
32 |
+
@validate_hf_hub_args
|
33 |
+
def load_photomaker_adapter(
|
34 |
+
self,
|
35 |
+
pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
|
36 |
+
weight_name: str,
|
37 |
+
subfolder: str = '',
|
38 |
+
trigger_word: str = 'img',
|
39 |
+
**kwargs,
|
40 |
+
):
|
41 |
+
"""
|
42 |
+
Parameters:
|
43 |
+
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
|
44 |
+
Can be either:
|
45 |
+
|
46 |
+
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
|
47 |
+
the Hub.
|
48 |
+
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
|
49 |
+
with [`ModelMixin.save_pretrained`].
|
50 |
+
- A [torch state
|
51 |
+
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
|
52 |
+
|
53 |
+
weight_name (`str`):
|
54 |
+
The weight name NOT the path to the weight.
|
55 |
+
|
56 |
+
subfolder (`str`, defaults to `""`):
|
57 |
+
The subfolder location of a model file within a larger model repository on the Hub or locally.
|
58 |
+
|
59 |
+
trigger_word (`str`, *optional*, defaults to `"img"`):
|
60 |
+
The trigger word is used to identify the position of class word in the text prompt,
|
61 |
+
and it is recommended not to set it as a common word.
|
62 |
+
This trigger word must be placed after the class word when used, otherwise, it will affect the performance of the personalized generation.
|
63 |
+
"""
|
64 |
+
|
65 |
+
# Load the main state dict first.
|
66 |
+
cache_dir = kwargs.pop("cache_dir", None)
|
67 |
+
force_download = kwargs.pop("force_download", False)
|
68 |
+
resume_download = kwargs.pop("resume_download", False)
|
69 |
+
proxies = kwargs.pop("proxies", None)
|
70 |
+
local_files_only = kwargs.pop("local_files_only", None)
|
71 |
+
token = kwargs.pop("token", None)
|
72 |
+
revision = kwargs.pop("revision", None)
|
73 |
+
|
74 |
+
user_agent = {
|
75 |
+
"file_type": "attn_procs_weights",
|
76 |
+
"framework": "pytorch",
|
77 |
+
}
|
78 |
+
|
79 |
+
if not isinstance(pretrained_model_name_or_path_or_dict, dict):
|
80 |
+
model_file = _get_model_file(
|
81 |
+
pretrained_model_name_or_path_or_dict,
|
82 |
+
weights_name=weight_name,
|
83 |
+
cache_dir=cache_dir,
|
84 |
+
force_download=force_download,
|
85 |
+
resume_download=resume_download,
|
86 |
+
proxies=proxies,
|
87 |
+
local_files_only=local_files_only,
|
88 |
+
token=token,
|
89 |
+
revision=revision,
|
90 |
+
subfolder=subfolder,
|
91 |
+
user_agent=user_agent,
|
92 |
+
)
|
93 |
+
if weight_name.endswith(".safetensors"):
|
94 |
+
state_dict = {"id_encoder": {}, "lora_weights": {}}
|
95 |
+
with safe_open(model_file, framework="pt", device="cpu") as f:
|
96 |
+
for key in f.keys():
|
97 |
+
if key.startswith("id_encoder."):
|
98 |
+
state_dict["id_encoder"][key.replace("id_encoder.", "")] = f.get_tensor(key)
|
99 |
+
elif key.startswith("lora_weights."):
|
100 |
+
state_dict["lora_weights"][key.replace("lora_weights.", "")] = f.get_tensor(key)
|
101 |
+
else:
|
102 |
+
state_dict = torch.load(model_file, map_location="cpu")
|
103 |
+
else:
|
104 |
+
state_dict = pretrained_model_name_or_path_or_dict
|
105 |
+
|
106 |
+
keys = list(state_dict.keys())
|
107 |
+
if keys != ["id_encoder", "lora_weights"]:
|
108 |
+
raise ValueError("Required keys are (`id_encoder` and `lora_weights`) missing from the state dict.")
|
109 |
+
|
110 |
+
self.trigger_word = trigger_word
|
111 |
+
# load finetuned CLIP image encoder and fuse module here if it has not been registered to the pipeline yet
|
112 |
+
print(f"Loading PhotoMaker components [1] id_encoder from [{pretrained_model_name_or_path_or_dict}]...")
|
113 |
+
id_encoder = PhotoMakerIDEncoder()
|
114 |
+
id_encoder.load_state_dict(state_dict["id_encoder"], strict=True)
|
115 |
+
id_encoder = id_encoder.to(self.device, dtype=self.unet.dtype)
|
116 |
+
self.id_encoder = id_encoder
|
117 |
+
self.id_image_processor = CLIPImageProcessor()
|
118 |
+
|
119 |
+
# load lora into models
|
120 |
+
print(f"Loading PhotoMaker components [2] lora_weights from [{pretrained_model_name_or_path_or_dict}]")
|
121 |
+
self.load_lora_weights(state_dict["lora_weights"], adapter_name="photomaker")
|
122 |
+
|
123 |
+
# Add trigger word token
|
124 |
+
if self.tokenizer is not None:
|
125 |
+
self.tokenizer.add_tokens([self.trigger_word], special_tokens=True)
|
126 |
+
|
127 |
+
self.tokenizer_2.add_tokens([self.trigger_word], special_tokens=True)
|
128 |
+
|
129 |
+
|
130 |
+
def encode_prompt_with_trigger_word(
|
131 |
+
self,
|
132 |
+
prompt: str,
|
133 |
+
prompt_2: Optional[str] = None,
|
134 |
+
num_id_images: int = 1,
|
135 |
+
device: Optional[torch.device] = None,
|
136 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
137 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
138 |
+
class_tokens_mask: Optional[torch.LongTensor] = None,
|
139 |
+
):
|
140 |
+
device = device or self._execution_device
|
141 |
+
|
142 |
+
if prompt is not None and isinstance(prompt, str):
|
143 |
+
batch_size = 1
|
144 |
+
elif prompt is not None and isinstance(prompt, list):
|
145 |
+
batch_size = len(prompt)
|
146 |
+
else:
|
147 |
+
batch_size = prompt_embeds.shape[0]
|
148 |
+
|
149 |
+
# Find the token id of the trigger word
|
150 |
+
image_token_id = self.tokenizer_2.convert_tokens_to_ids(self.trigger_word)
|
151 |
+
|
152 |
+
# Define tokenizers and text encoders
|
153 |
+
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
|
154 |
+
text_encoders = (
|
155 |
+
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
|
156 |
+
)
|
157 |
+
|
158 |
+
if prompt_embeds is None:
|
159 |
+
prompt_2 = prompt_2 or prompt
|
160 |
+
prompt_embeds_list = []
|
161 |
+
prompts = [prompt, prompt_2]
|
162 |
+
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
|
163 |
+
input_ids = tokenizer.encode(prompt) # TODO: batch encode
|
164 |
+
clean_index = 0
|
165 |
+
clean_input_ids = []
|
166 |
+
class_token_index = []
|
167 |
+
# Find out the corresponding class word token based on the newly added trigger word token
|
168 |
+
for i, token_id in enumerate(input_ids):
|
169 |
+
if token_id == image_token_id:
|
170 |
+
class_token_index.append(clean_index - 1)
|
171 |
+
else:
|
172 |
+
clean_input_ids.append(token_id)
|
173 |
+
clean_index += 1
|
174 |
+
|
175 |
+
if len(class_token_index) != 1:
|
176 |
+
raise ValueError(
|
177 |
+
f"PhotoMaker currently does not support multiple trigger words in a single prompt.\
|
178 |
+
Trigger word: {self.trigger_word}, Prompt: {prompt}."
|
179 |
+
)
|
180 |
+
class_token_index = class_token_index[0]
|
181 |
+
|
182 |
+
# Expand the class word token and corresponding mask
|
183 |
+
class_token = clean_input_ids[class_token_index]
|
184 |
+
clean_input_ids = clean_input_ids[:class_token_index] + [class_token] * num_id_images + \
|
185 |
+
clean_input_ids[class_token_index+1:]
|
186 |
+
|
187 |
+
# Truncation or padding
|
188 |
+
max_len = tokenizer.model_max_length
|
189 |
+
if len(clean_input_ids) > max_len:
|
190 |
+
clean_input_ids = clean_input_ids[:max_len]
|
191 |
+
else:
|
192 |
+
clean_input_ids = clean_input_ids + [tokenizer.pad_token_id] * (
|
193 |
+
max_len - len(clean_input_ids)
|
194 |
+
)
|
195 |
+
|
196 |
+
class_tokens_mask = [True if class_token_index <= i < class_token_index+num_id_images else False \
|
197 |
+
for i in range(len(clean_input_ids))]
|
198 |
+
|
199 |
+
clean_input_ids = torch.tensor(clean_input_ids, dtype=torch.long).unsqueeze(0)
|
200 |
+
class_tokens_mask = torch.tensor(class_tokens_mask, dtype=torch.bool).unsqueeze(0)
|
201 |
+
|
202 |
+
prompt_embeds = text_encoder(
|
203 |
+
clean_input_ids.to(device),
|
204 |
+
output_hidden_states=True,
|
205 |
+
)
|
206 |
+
|
207 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
208 |
+
pooled_prompt_embeds = prompt_embeds[0]
|
209 |
+
prompt_embeds = prompt_embeds.hidden_states[-2]
|
210 |
+
prompt_embeds_list.append(prompt_embeds)
|
211 |
+
|
212 |
+
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
213 |
+
|
214 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
215 |
+
class_tokens_mask = class_tokens_mask.to(device=device) # TODO: ignoring two-prompt case
|
216 |
+
|
217 |
+
return prompt_embeds, pooled_prompt_embeds, class_tokens_mask
|
218 |
+
|
219 |
+
@property
|
220 |
+
def interrupt(self):
|
221 |
+
return self._interrupt
|
222 |
+
|
223 |
+
@torch.no_grad()
|
224 |
+
def __call__(
|
225 |
+
self,
|
226 |
+
prompt: Union[str, List[str]] = None,
|
227 |
+
prompt_2: Optional[Union[str, List[str]]] = None,
|
228 |
+
height: Optional[int] = None,
|
229 |
+
width: Optional[int] = None,
|
230 |
+
num_inference_steps: int = 50,
|
231 |
+
denoising_end: Optional[float] = None,
|
232 |
+
guidance_scale: float = 5.0,
|
233 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
234 |
+
negative_prompt_2: Optional[Union[str, List[str]]] = None,
|
235 |
+
num_images_per_prompt: Optional[int] = 1,
|
236 |
+
eta: float = 0.0,
|
237 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
238 |
+
latents: Optional[torch.FloatTensor] = None,
|
239 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
240 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
241 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
242 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
243 |
+
output_type: Optional[str] = "pil",
|
244 |
+
return_dict: bool = True,
|
245 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
246 |
+
guidance_rescale: float = 0.0,
|
247 |
+
original_size: Optional[Tuple[int, int]] = None,
|
248 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
249 |
+
target_size: Optional[Tuple[int, int]] = None,
|
250 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
251 |
+
callback_steps: int = 1,
|
252 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
253 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
254 |
+
# Added parameters (for PhotoMaker)
|
255 |
+
input_id_images: PipelineImageInput = None,
|
256 |
+
start_merge_step: int = 0, # TODO: change to `style_strength_ratio` in the future
|
257 |
+
class_tokens_mask: Optional[torch.LongTensor] = None,
|
258 |
+
prompt_embeds_text_only: Optional[torch.FloatTensor] = None,
|
259 |
+
pooled_prompt_embeds_text_only: Optional[torch.FloatTensor] = None,
|
260 |
+
):
|
261 |
+
r"""
|
262 |
+
Function invoked when calling the pipeline for generation.
|
263 |
+
Only the parameters introduced by PhotoMaker are discussed here.
|
264 |
+
For explanations of the previous parameters in StableDiffusionXLPipeline, please refer to https://github.com/huggingface/diffusers/blob/v0.25.0/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py
|
265 |
+
|
266 |
+
Args:
|
267 |
+
input_id_images (`PipelineImageInput`, *optional*):
|
268 |
+
Input ID Image to work with PhotoMaker.
|
269 |
+
class_tokens_mask (`torch.LongTensor`, *optional*):
|
270 |
+
Pre-generated class token. When the `prompt_embeds` parameter is provided in advance, it is necessary to prepare the `class_tokens_mask` beforehand for marking out the position of class word.
|
271 |
+
prompt_embeds_text_only (`torch.FloatTensor`, *optional*):
|
272 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
273 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
274 |
+
pooled_prompt_embeds_text_only (`torch.FloatTensor`, *optional*):
|
275 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
276 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
277 |
+
|
278 |
+
Returns:
|
279 |
+
[`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`:
|
280 |
+
[`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
|
281 |
+
`tuple`. When returning a tuple, the first element is a list with the generated images.
|
282 |
+
"""
|
283 |
+
# 0. Default height and width to unet
|
284 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
285 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
286 |
+
|
287 |
+
original_size = original_size or (height, width)
|
288 |
+
target_size = target_size or (height, width)
|
289 |
+
|
290 |
+
# 1. Check inputs. Raise error if not correct
|
291 |
+
self.check_inputs(
|
292 |
+
prompt,
|
293 |
+
prompt_2,
|
294 |
+
height,
|
295 |
+
width,
|
296 |
+
callback_steps,
|
297 |
+
negative_prompt,
|
298 |
+
negative_prompt_2,
|
299 |
+
prompt_embeds,
|
300 |
+
negative_prompt_embeds,
|
301 |
+
pooled_prompt_embeds,
|
302 |
+
negative_pooled_prompt_embeds,
|
303 |
+
callback_on_step_end_tensor_inputs,
|
304 |
+
)
|
305 |
+
|
306 |
+
self._interrupt = False
|
307 |
+
|
308 |
+
#
|
309 |
+
if prompt_embeds is not None and class_tokens_mask is None:
|
310 |
+
raise ValueError(
|
311 |
+
"If `prompt_embeds` are provided, `class_tokens_mask` also have to be passed. Make sure to generate `class_tokens_mask` from the same tokenizer that was used to generate `prompt_embeds`."
|
312 |
+
)
|
313 |
+
# check the input id images
|
314 |
+
if input_id_images is None:
|
315 |
+
raise ValueError(
|
316 |
+
"Provide `input_id_images`. Cannot leave `input_id_images` undefined for PhotoMaker pipeline."
|
317 |
+
)
|
318 |
+
if not isinstance(input_id_images, list):
|
319 |
+
input_id_images = [input_id_images]
|
320 |
+
|
321 |
+
# 2. Define call parameters
|
322 |
+
if prompt is not None and isinstance(prompt, str):
|
323 |
+
batch_size = 1
|
324 |
+
prompt = [prompt]
|
325 |
+
elif prompt is not None and isinstance(prompt, list):
|
326 |
+
batch_size = len(prompt)
|
327 |
+
else:
|
328 |
+
batch_size = prompt_embeds.shape[0]
|
329 |
+
|
330 |
+
device = self._execution_device
|
331 |
+
|
332 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
333 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
334 |
+
# corresponds to doing no classifier free guidance.
|
335 |
+
do_classifier_free_guidance = guidance_scale >= 1.0
|
336 |
+
|
337 |
+
assert do_classifier_free_guidance
|
338 |
+
|
339 |
+
# 3. Encode input prompt
|
340 |
+
num_id_images = len(input_id_images)
|
341 |
+
if isinstance(prompt, list):
|
342 |
+
prompt_arr = prompt
|
343 |
+
negative_prompt_embeds_arr = []
|
344 |
+
prompt_embeds_text_only_arr = []
|
345 |
+
prompt_embeds_arr = []
|
346 |
+
latents_arr = []
|
347 |
+
add_time_ids_arr = []
|
348 |
+
negative_pooled_prompt_embeds_arr = []
|
349 |
+
pooled_prompt_embeds_text_only_arr = []
|
350 |
+
pooled_prompt_embeds_arr = []
|
351 |
+
for prompt in prompt_arr:
|
352 |
+
(
|
353 |
+
prompt_embeds,
|
354 |
+
pooled_prompt_embeds,
|
355 |
+
class_tokens_mask,
|
356 |
+
) = self.encode_prompt_with_trigger_word(
|
357 |
+
prompt=prompt,
|
358 |
+
prompt_2=prompt_2,
|
359 |
+
device=device,
|
360 |
+
num_id_images=num_id_images,
|
361 |
+
prompt_embeds=prompt_embeds,
|
362 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
363 |
+
class_tokens_mask=class_tokens_mask,
|
364 |
+
)
|
365 |
+
|
366 |
+
# 4. Encode input prompt without the trigger word for delayed conditioning
|
367 |
+
# encode, remove trigger word token, then decode
|
368 |
+
tokens_text_only = self.tokenizer.encode(prompt, add_special_tokens=False)
|
369 |
+
trigger_word_token = self.tokenizer.convert_tokens_to_ids(self.trigger_word)
|
370 |
+
tokens_text_only.remove(trigger_word_token)
|
371 |
+
prompt_text_only = self.tokenizer.decode(tokens_text_only, add_special_tokens=False)
|
372 |
+
print(prompt_text_only)
|
373 |
+
(
|
374 |
+
prompt_embeds_text_only,
|
375 |
+
negative_prompt_embeds,
|
376 |
+
pooled_prompt_embeds_text_only, # TODO: replace the pooled_prompt_embeds with text only prompt
|
377 |
+
negative_pooled_prompt_embeds,
|
378 |
+
) = self.encode_prompt(
|
379 |
+
prompt=prompt_text_only,
|
380 |
+
prompt_2=prompt_2,
|
381 |
+
device=device,
|
382 |
+
num_images_per_prompt=num_images_per_prompt,
|
383 |
+
do_classifier_free_guidance=True,
|
384 |
+
negative_prompt=negative_prompt,
|
385 |
+
negative_prompt_2=negative_prompt_2,
|
386 |
+
prompt_embeds=prompt_embeds_text_only,
|
387 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
388 |
+
pooled_prompt_embeds=pooled_prompt_embeds_text_only,
|
389 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
390 |
+
)
|
391 |
+
|
392 |
+
# 5. Prepare the input ID images
|
393 |
+
dtype = next(self.id_encoder.parameters()).dtype
|
394 |
+
if not isinstance(input_id_images[0], torch.Tensor):
|
395 |
+
id_pixel_values = self.id_image_processor(input_id_images, return_tensors="pt").pixel_values
|
396 |
+
|
397 |
+
id_pixel_values = id_pixel_values.unsqueeze(0).to(device=device, dtype=dtype) # TODO: multiple prompts
|
398 |
+
|
399 |
+
# 6. Get the update text embedding with the stacked ID embedding
|
400 |
+
prompt_embeds = self.id_encoder(id_pixel_values, prompt_embeds, class_tokens_mask)
|
401 |
+
|
402 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
403 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
404 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
405 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
406 |
+
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
407 |
+
bs_embed * num_images_per_prompt, -1
|
408 |
+
)
|
409 |
+
|
410 |
+
|
411 |
+
negative_prompt_embeds_arr.append(negative_prompt_embeds)
|
412 |
+
negative_prompt_embeds = None
|
413 |
+
negative_pooled_prompt_embeds_arr.append(negative_pooled_prompt_embeds)
|
414 |
+
negative_pooled_prompt_embeds = None
|
415 |
+
prompt_embeds_text_only_arr.append(prompt_embeds_text_only)
|
416 |
+
prompt_embeds_text_only = None
|
417 |
+
prompt_embeds_arr.append(prompt_embeds)
|
418 |
+
prompt_embeds = None
|
419 |
+
pooled_prompt_embeds_arr.append(pooled_prompt_embeds)
|
420 |
+
pooled_prompt_embeds = None
|
421 |
+
pooled_prompt_embeds_text_only_arr.append(pooled_prompt_embeds_text_only)
|
422 |
+
pooled_prompt_embeds_text_only = None
|
423 |
+
# 7. Prepare timesteps
|
424 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
425 |
+
timesteps = self.scheduler.timesteps
|
426 |
+
|
427 |
+
negative_prompt_embeds = torch.cat(negative_prompt_embeds_arr ,dim =0)
|
428 |
+
print(negative_prompt_embeds.shape)
|
429 |
+
prompt_embeds = torch.cat(prompt_embeds_arr ,dim = 0)
|
430 |
+
print(prompt_embeds.shape)
|
431 |
+
|
432 |
+
prompt_embeds_text_only = torch.cat(prompt_embeds_text_only_arr ,dim = 0)
|
433 |
+
print(prompt_embeds_text_only.shape)
|
434 |
+
pooled_prompt_embeds_text_only = torch.cat(pooled_prompt_embeds_text_only_arr ,dim = 0)
|
435 |
+
print(pooled_prompt_embeds_text_only.shape)
|
436 |
+
|
437 |
+
negative_pooled_prompt_embeds = torch.cat(negative_pooled_prompt_embeds_arr ,dim = 0)
|
438 |
+
print(negative_pooled_prompt_embeds.shape)
|
439 |
+
pooled_prompt_embeds = torch.cat(pooled_prompt_embeds_arr,dim = 0)
|
440 |
+
print(pooled_prompt_embeds.shape)
|
441 |
+
# 8. Prepare latent variables
|
442 |
+
num_channels_latents = self.unet.config.in_channels
|
443 |
+
latents = self.prepare_latents(
|
444 |
+
batch_size * num_images_per_prompt,
|
445 |
+
num_channels_latents,
|
446 |
+
height,
|
447 |
+
width,
|
448 |
+
prompt_embeds.dtype,
|
449 |
+
device,
|
450 |
+
generator,
|
451 |
+
latents,
|
452 |
+
)
|
453 |
+
|
454 |
+
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
455 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
456 |
+
|
457 |
+
# 10. Prepare added time ids & embeddings
|
458 |
+
if self.text_encoder_2 is None:
|
459 |
+
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
|
460 |
+
else:
|
461 |
+
text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
|
462 |
+
|
463 |
+
add_time_ids = self._get_add_time_ids(
|
464 |
+
original_size,
|
465 |
+
crops_coords_top_left,
|
466 |
+
target_size,
|
467 |
+
dtype=prompt_embeds.dtype,
|
468 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
469 |
+
)
|
470 |
+
add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
|
471 |
+
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
472 |
+
|
473 |
+
|
474 |
+
print(latents.shape)
|
475 |
+
print(add_time_ids.shape)
|
476 |
+
|
477 |
+
# 11. Denoising loop
|
478 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
479 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
480 |
+
for i, t in enumerate(timesteps):
|
481 |
+
if self.interrupt:
|
482 |
+
continue
|
483 |
+
|
484 |
+
latent_model_input = (
|
485 |
+
torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
486 |
+
)
|
487 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
488 |
+
|
489 |
+
if i <= start_merge_step:
|
490 |
+
current_prompt_embeds = torch.cat(
|
491 |
+
[negative_prompt_embeds, prompt_embeds_text_only], dim=0
|
492 |
+
)
|
493 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds_text_only], dim=0)
|
494 |
+
else:
|
495 |
+
current_prompt_embeds = torch.cat(
|
496 |
+
[negative_prompt_embeds, prompt_embeds], dim=0
|
497 |
+
)
|
498 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0)
|
499 |
+
# predict the noise residual
|
500 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
501 |
+
# print(latent_model_input.shape)
|
502 |
+
# print(t)
|
503 |
+
# print(current_prompt_embeds.shape)
|
504 |
+
# print(add_text_embeds.shape)
|
505 |
+
# print(add_time_ids.shape)
|
506 |
+
#zeros_matrix =
|
507 |
+
#global_mask1024 = torch.cat([torch.randn(1, 1024, 1, 1, device=device) for random_number])
|
508 |
+
#global_mask4096 =
|
509 |
+
noise_pred = self.unet(
|
510 |
+
latent_model_input,
|
511 |
+
t,
|
512 |
+
encoder_hidden_states=current_prompt_embeds,
|
513 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
514 |
+
added_cond_kwargs=added_cond_kwargs,
|
515 |
+
return_dict=False,
|
516 |
+
)[0]
|
517 |
+
# print(noise_pred.shape)
|
518 |
+
# perform guidance
|
519 |
+
if do_classifier_free_guidance:
|
520 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
521 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
522 |
+
|
523 |
+
if do_classifier_free_guidance and guidance_rescale > 0.0:
|
524 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
525 |
+
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
|
526 |
+
|
527 |
+
# compute the previous noisy sample x_t -> x_t-1
|
528 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
529 |
+
|
530 |
+
if callback_on_step_end is not None:
|
531 |
+
callback_kwargs = {}
|
532 |
+
for k in callback_on_step_end_tensor_inputs:
|
533 |
+
callback_kwargs[k] = locals()[k]
|
534 |
+
|
535 |
+
ck_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
536 |
+
|
537 |
+
latents = callback_outputs.pop("latents", latents)
|
538 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
539 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
540 |
+
add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
|
541 |
+
# negative_pooled_prompt_embeds = callback_outputs.pop(
|
542 |
+
# "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
|
543 |
+
# )
|
544 |
+
# add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
|
545 |
+
# negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids)
|
546 |
+
|
547 |
+
# call the callback, if provided
|
548 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
549 |
+
progress_bar.update()
|
550 |
+
if callback is not None and i % callback_steps == 0:
|
551 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
552 |
+
callback(step_idx, t, latents)
|
553 |
+
|
554 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
555 |
+
if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
|
556 |
+
self.upcast_vae()
|
557 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
558 |
+
|
559 |
+
if not output_type == "latent":
|
560 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
561 |
+
else:
|
562 |
+
image = latents
|
563 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
564 |
+
|
565 |
+
# apply watermark if available
|
566 |
+
# if self.watermark is not None:
|
567 |
+
# image = self.watermark.apply_watermark(image)
|
568 |
+
|
569 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
570 |
+
|
571 |
+
# Offload all models
|
572 |
+
self.maybe_free_model_hooks()
|
573 |
+
|
574 |
+
if not return_dict:
|
575 |
+
return (image,)
|
576 |
+
|
577 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
storyDiffusion/utils/style_template.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
style_list = [
|
2 |
+
{
|
3 |
+
"name": "(No style)",
|
4 |
+
"prompt": "{prompt}",
|
5 |
+
"negative_prompt": "",
|
6 |
+
},
|
7 |
+
{
|
8 |
+
"name": "Japanese Anime",
|
9 |
+
"prompt": "anime artwork illustrating {prompt}. created by japanese anime studio. highly emotional. best quality, high resolution",
|
10 |
+
"negative_prompt": "low quality, low resolution"
|
11 |
+
},
|
12 |
+
{
|
13 |
+
"name": "Cinematic",
|
14 |
+
"prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
|
15 |
+
"negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"name": "Disney Charactor",
|
19 |
+
"prompt": "A Pixar animation character of {prompt} . pixar-style, studio anime, Disney, high-quality",
|
20 |
+
"negative_prompt": "lowres, bad anatomy, bad hands, text, bad eyes, bad arms, bad legs, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, blurry, grayscale, noisy, sloppy, messy, grainy, highly detailed, ultra textured, photo",
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"name": "Photographic",
|
24 |
+
"prompt": "cinematic photo {prompt} . 35mm photograph, film, bokeh, professional, 4k, highly detailed",
|
25 |
+
"negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
|
26 |
+
},
|
27 |
+
{
|
28 |
+
"name": "Comic book",
|
29 |
+
"prompt": "comic {prompt} . graphic illustration, comic art, graphic novel art, vibrant, highly detailed",
|
30 |
+
"negative_prompt": "photograph, deformed, glitch, noisy, realistic, stock photo",
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"name": "Line art",
|
34 |
+
"prompt": "line art drawing {prompt} . professional, sleek, modern, minimalist, graphic, line art, vector graphics",
|
35 |
+
"negative_prompt": "anime, photorealistic, 35mm film, deformed, glitch, blurry, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, mutated, realism, realistic, impressionism, expressionism, oil, acrylic",
|
36 |
+
}
|
37 |
+
]
|
38 |
+
|
39 |
+
styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
|
storyDiffusion/utils/utils.py
ADDED
@@ -0,0 +1,421 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from email.mime import image
|
2 |
+
import torch
|
3 |
+
import base64
|
4 |
+
import gradio as gr
|
5 |
+
import numpy as np
|
6 |
+
from PIL import Image,ImageOps,ImageDraw, ImageFont
|
7 |
+
from io import BytesIO
|
8 |
+
import random
|
9 |
+
MAX_COLORS = 12
|
10 |
+
def get_random_bool():
|
11 |
+
return random.choice([True, False])
|
12 |
+
|
13 |
+
def add_white_border(input_image, border_width=10):
|
14 |
+
"""
|
15 |
+
为PIL图像添加指定宽度的白色边框。
|
16 |
+
|
17 |
+
:param input_image: PIL图像对象
|
18 |
+
:param border_width: 边框宽度(单位:像素)
|
19 |
+
:return: 带有白色边框的PIL图像对象
|
20 |
+
"""
|
21 |
+
border_color = 'white' # 白色边框
|
22 |
+
# 添加边框
|
23 |
+
img_with_border = ImageOps.expand(input_image, border=border_width, fill=border_color)
|
24 |
+
return img_with_border
|
25 |
+
|
26 |
+
def process_mulline_text(draw, text, font, max_width):
|
27 |
+
"""
|
28 |
+
Draw the text on an image with word wrapping.
|
29 |
+
"""
|
30 |
+
lines = [] # Store the lines of text here
|
31 |
+
words = text.split()
|
32 |
+
|
33 |
+
# Start building lines of text, and wrap when necessary
|
34 |
+
current_line = ""
|
35 |
+
for word in words:
|
36 |
+
test_line = f"{current_line} {word}".strip()
|
37 |
+
# Check the width of the line with this word added
|
38 |
+
width, _ = draw.textsize(test_line, font=font)
|
39 |
+
if width <= max_width:
|
40 |
+
# If it fits, add this word to the current line
|
41 |
+
current_line = test_line
|
42 |
+
else:
|
43 |
+
# If not, store the line and start a new one
|
44 |
+
lines.append(current_line)
|
45 |
+
current_line = word
|
46 |
+
# Add the last line
|
47 |
+
lines.append(current_line)
|
48 |
+
return lines
|
49 |
+
|
50 |
+
|
51 |
+
|
52 |
+
def add_caption(image, text, position = "bottom-mid", font = None, text_color= 'black', bg_color = (255, 255, 255) , bg_opacity = 200):
|
53 |
+
if text == "":
|
54 |
+
return image
|
55 |
+
image = image.convert("RGBA")
|
56 |
+
draw = ImageDraw.Draw(image)
|
57 |
+
width, height = image.size
|
58 |
+
lines = process_mulline_text(draw,text,font,width)
|
59 |
+
text_positions = []
|
60 |
+
maxwidth = 0
|
61 |
+
for ind, line in enumerate(lines[::-1]):
|
62 |
+
text_width, text_height = draw.textsize(line, font=font)
|
63 |
+
if position == 'bottom-right':
|
64 |
+
text_position = (width - text_width - 10, height - (text_height + 20))
|
65 |
+
elif position == 'bottom-left':
|
66 |
+
text_position = (10, height - (text_height + 20))
|
67 |
+
elif position == 'bottom-mid':
|
68 |
+
text_position = ((width - text_width) // 2, height - (text_height + 20) ) # 居中文本
|
69 |
+
height = text_position[1]
|
70 |
+
maxwidth = max(maxwidth,text_width)
|
71 |
+
text_positions.append(text_position)
|
72 |
+
rectpos = (width - maxwidth) // 2
|
73 |
+
rectangle_position = [rectpos - 5, text_positions[-1][1] - 5, rectpos + maxwidth + 5, text_positions[0][1] + text_height + 5]
|
74 |
+
image_with_transparency = Image.new('RGBA', image.size)
|
75 |
+
draw_with_transparency = ImageDraw.Draw(image_with_transparency)
|
76 |
+
draw_with_transparency.rectangle(rectangle_position, fill=bg_color + (bg_opacity,))
|
77 |
+
|
78 |
+
image.paste(Image.alpha_composite(image.convert('RGBA'), image_with_transparency))
|
79 |
+
print(ind,text_position)
|
80 |
+
draw = ImageDraw.Draw(image)
|
81 |
+
for ind, line in enumerate(lines[::-1]):
|
82 |
+
text_position = text_positions[ind]
|
83 |
+
draw.text(text_position, line, fill=text_color, font=font)
|
84 |
+
|
85 |
+
return image.convert('RGB')
|
86 |
+
|
87 |
+
def get_comic(images,types = "4panel",captions = [],font = None,pad_image = None):
|
88 |
+
if pad_image == None:
|
89 |
+
pad_image = Image.open("./images/pad_images.png")
|
90 |
+
if font == None:
|
91 |
+
font = ImageFont.truetype("./fonts/Inkfree.ttf", int(30 * images[0].size[1] / 1024))
|
92 |
+
if types == "No typesetting (default)":
|
93 |
+
return images
|
94 |
+
elif types == "Four Pannel":
|
95 |
+
return get_comic_4panel(images,captions,font,pad_image)
|
96 |
+
else: # "Classic Comic Style"
|
97 |
+
return get_comic_classical(images,captions,font,pad_image)
|
98 |
+
|
99 |
+
def get_caption_group(images_groups,captions = []):
|
100 |
+
caption_groups = []
|
101 |
+
for i in range(len(images_groups)):
|
102 |
+
length = len(images_groups[i])
|
103 |
+
caption_groups.append(captions[:length])
|
104 |
+
captions = captions[length:]
|
105 |
+
if len(caption_groups[-1]) < len(images_groups[-1]):
|
106 |
+
caption_groups[-1] = caption_groups[-1] + [""] * (len(images_groups[-1]) - len(caption_groups[-1]))
|
107 |
+
return caption_groups
|
108 |
+
|
109 |
+
def get_comic_classical(images,captions = None,font = None,pad_image = None):
|
110 |
+
if pad_image == None:
|
111 |
+
raise ValueError("pad_image is None")
|
112 |
+
images = [add_white_border(image) for image in images]
|
113 |
+
pad_image = pad_image.resize(images[0].size, Image.ANTIALIAS)
|
114 |
+
images_groups = distribute_images2(images,pad_image)
|
115 |
+
print(images_groups)
|
116 |
+
if captions != None:
|
117 |
+
captions_groups = get_caption_group(images_groups,captions)
|
118 |
+
# print(images_groups)
|
119 |
+
row_images = []
|
120 |
+
for ind, img_group in enumerate(images_groups):
|
121 |
+
row_images.append(get_row_image2(img_group ,captions= captions_groups[ind] if captions != None else None,font = font))
|
122 |
+
|
123 |
+
return [combine_images_vertically_with_resize(row_images)]
|
124 |
+
|
125 |
+
|
126 |
+
|
127 |
+
def get_comic_4panel(images,captions = [],font = None,pad_image = None):
|
128 |
+
if pad_image == None:
|
129 |
+
raise ValueError("pad_image is None")
|
130 |
+
pad_image = pad_image.resize(images[0].size, Image.ANTIALIAS)
|
131 |
+
images = [add_white_border(image) for image in images]
|
132 |
+
assert len(captions) == len(images)
|
133 |
+
for i,caption in enumerate(captions):
|
134 |
+
images[i] = add_caption(images[i],caption,font = font)
|
135 |
+
images_nums = len(images)
|
136 |
+
pad_nums = int((4 - images_nums % 4) % 4)
|
137 |
+
images = images + [pad_image for _ in range(pad_nums)]
|
138 |
+
comics = []
|
139 |
+
assert len(images)%4 == 0
|
140 |
+
for i in range(len(images)//4):
|
141 |
+
comics.append(combine_images_vertically_with_resize([combine_images_horizontally(images[i*4:i*4+2]), combine_images_horizontally(images[i*4+2:i*4+4])]))
|
142 |
+
|
143 |
+
return comics
|
144 |
+
|
145 |
+
def get_row_image(images):
|
146 |
+
row_image_arr = []
|
147 |
+
if len(images)>3:
|
148 |
+
stack_img_nums = (len(images) - 2)//2
|
149 |
+
else:
|
150 |
+
stack_img_nums = 0
|
151 |
+
while(len(images)>0):
|
152 |
+
if stack_img_nums <=0:
|
153 |
+
row_image_arr.append(images[0])
|
154 |
+
images = images[1:]
|
155 |
+
elif len(images)>stack_img_nums*2:
|
156 |
+
if get_random_bool():
|
157 |
+
row_image_arr.append(concat_images_vertically_and_scale(images[:2]))
|
158 |
+
images = images[2:]
|
159 |
+
stack_img_nums -=1
|
160 |
+
else:
|
161 |
+
row_image_arr.append(images[0])
|
162 |
+
images = images[1:]
|
163 |
+
else:
|
164 |
+
row_image_arr.append(concat_images_vertically_and_scale(images[:2]))
|
165 |
+
images = images[2:]
|
166 |
+
stack_img_nums-=1
|
167 |
+
return combine_images_horizontally(row_image_arr)
|
168 |
+
|
169 |
+
def get_row_image2(images,captions = None, font = None):
|
170 |
+
row_image_arr = []
|
171 |
+
if len(images)== 6:
|
172 |
+
sequence_list = [1,1,2,2]
|
173 |
+
elif len(images)== 4:
|
174 |
+
sequence_list = [1,1,2]
|
175 |
+
else:
|
176 |
+
raise ValueError("images nums is not 4 or 6 found",len(images))
|
177 |
+
random.shuffle(sequence_list)
|
178 |
+
index = 0
|
179 |
+
for length in sequence_list:
|
180 |
+
if length == 1:
|
181 |
+
if captions != None:
|
182 |
+
images_tmp = add_caption(images[0],text = captions[index],font= font)
|
183 |
+
else:
|
184 |
+
images_tmp = images[0]
|
185 |
+
row_image_arr.append( images_tmp)
|
186 |
+
images = images[1:]
|
187 |
+
index +=1
|
188 |
+
elif length == 2:
|
189 |
+
row_image_arr.append(concat_images_vertically_and_scale(images[:2]))
|
190 |
+
images = images[2:]
|
191 |
+
index +=2
|
192 |
+
|
193 |
+
return combine_images_horizontally(row_image_arr)
|
194 |
+
|
195 |
+
|
196 |
+
|
197 |
+
def concat_images_vertically_and_scale(images,scale_factor=2):
|
198 |
+
# 加载所有图像
|
199 |
+
# 确保所有图像的宽度一致
|
200 |
+
widths = [img.width for img in images]
|
201 |
+
if not all(width == widths[0] for width in widths):
|
202 |
+
raise ValueError('All images must have the same width.')
|
203 |
+
|
204 |
+
# 计算总高度
|
205 |
+
total_height = sum(img.height for img in images)
|
206 |
+
|
207 |
+
# 创建新的图像,宽度与原图相同,高度为所有图像高度之和
|
208 |
+
max_width = max(widths)
|
209 |
+
concatenated_image = Image.new('RGB', (max_width, total_height))
|
210 |
+
|
211 |
+
# 竖直拼接图像
|
212 |
+
current_height = 0
|
213 |
+
for img in images:
|
214 |
+
concatenated_image.paste(img, (0, current_height))
|
215 |
+
current_height += img.height
|
216 |
+
|
217 |
+
# 缩放图像为1/n高度
|
218 |
+
new_height = concatenated_image.height // scale_factor
|
219 |
+
new_width = concatenated_image.width // scale_factor
|
220 |
+
resized_image = concatenated_image.resize((new_width, new_height), Image.ANTIALIAS)
|
221 |
+
|
222 |
+
return resized_image
|
223 |
+
|
224 |
+
|
225 |
+
def combine_images_horizontally(images):
|
226 |
+
# 读取所有图片并存入列表
|
227 |
+
|
228 |
+
# 获取每幅图像的宽度和高度
|
229 |
+
widths, heights = zip(*(i.size for i in images))
|
230 |
+
|
231 |
+
# 计算总宽度和最大高度
|
232 |
+
total_width = sum(widths)
|
233 |
+
max_height = max(heights)
|
234 |
+
|
235 |
+
# 创建新的空白图片,用于拼接
|
236 |
+
new_im = Image.new('RGB', (total_width, max_height))
|
237 |
+
|
238 |
+
# 将图片横向拼接
|
239 |
+
x_offset = 0
|
240 |
+
for im in images:
|
241 |
+
new_im.paste(im, (x_offset, 0))
|
242 |
+
x_offset += im.width
|
243 |
+
|
244 |
+
return new_im
|
245 |
+
|
246 |
+
def combine_images_vertically_with_resize(images):
|
247 |
+
|
248 |
+
# 获取所有图片的宽度和高度
|
249 |
+
widths, heights = zip(*(i.size for i in images))
|
250 |
+
|
251 |
+
# 确定新图片的宽度,即所有图片中最小的宽度
|
252 |
+
min_width = min(widths)
|
253 |
+
|
254 |
+
# 调整图片尺寸以保持宽度一致,长宽比不变
|
255 |
+
resized_images = []
|
256 |
+
for img in images:
|
257 |
+
# 计算新高度保持图片长宽比
|
258 |
+
new_height = int(min_width * img.height / img.width)
|
259 |
+
# 调整图片大小
|
260 |
+
resized_img = img.resize((min_width, new_height), Image.ANTIALIAS)
|
261 |
+
resized_images.append(resized_img)
|
262 |
+
|
263 |
+
# 计算所有调整尺寸后图片的总高度
|
264 |
+
total_height = sum(img.height for img in resized_images)
|
265 |
+
|
266 |
+
# 创建一个足够宽和高的新图片对象
|
267 |
+
new_im = Image.new('RGB', (min_width, total_height))
|
268 |
+
|
269 |
+
# 竖直拼接图片
|
270 |
+
y_offset = 0
|
271 |
+
for im in resized_images:
|
272 |
+
new_im.paste(im, (0, y_offset))
|
273 |
+
y_offset += im.height
|
274 |
+
|
275 |
+
return new_im
|
276 |
+
|
277 |
+
def distribute_images2(images, pad_image):
|
278 |
+
groups = []
|
279 |
+
remaining = len(images)
|
280 |
+
if len(images) <= 8:
|
281 |
+
group_sizes = [4]
|
282 |
+
else:
|
283 |
+
group_sizes = [4, 6]
|
284 |
+
|
285 |
+
size_index = 0
|
286 |
+
while remaining > 0:
|
287 |
+
size = group_sizes[size_index%len(group_sizes)]
|
288 |
+
if remaining < size and remaining < min(group_sizes):
|
289 |
+
size = min(group_sizes)
|
290 |
+
if remaining > size:
|
291 |
+
new_group = images[-remaining: -remaining + size]
|
292 |
+
else:
|
293 |
+
new_group = images[-remaining:]
|
294 |
+
groups.append(new_group)
|
295 |
+
size_index += 1
|
296 |
+
remaining -= size
|
297 |
+
print(remaining,groups)
|
298 |
+
groups[-1] = groups[-1] + [pad_image for _ in range(-remaining)]
|
299 |
+
|
300 |
+
return groups
|
301 |
+
|
302 |
+
|
303 |
+
def distribute_images(images, group_sizes=(4, 3, 2)):
|
304 |
+
groups = []
|
305 |
+
remaining = len(images)
|
306 |
+
|
307 |
+
while remaining > 0:
|
308 |
+
# 优先分配最大组(4张图片),再考虑3张,最后处理2张
|
309 |
+
for size in sorted(group_sizes, reverse=True):
|
310 |
+
# 如果剩下的图片数量大于等于当前组大小,或者为图片总数时(也就是第一次迭代)
|
311 |
+
# 开始创建新组
|
312 |
+
if remaining >= size or remaining == len(images):
|
313 |
+
if remaining > size:
|
314 |
+
new_group = images[-remaining: -remaining + size]
|
315 |
+
else:
|
316 |
+
new_group = images[-remaining:]
|
317 |
+
groups.append(new_group)
|
318 |
+
remaining -= size
|
319 |
+
break
|
320 |
+
# 如果剩下的图片少于最小的组大小(2张)并且已经有组了,就把剩下的图片加到最后一个组
|
321 |
+
elif remaining < min(group_sizes) and groups:
|
322 |
+
groups[-1].extend(images[-remaining:])
|
323 |
+
remaining = 0
|
324 |
+
|
325 |
+
return groups
|
326 |
+
|
327 |
+
def create_binary_matrix(img_arr, target_color):
|
328 |
+
mask = np.all(img_arr == target_color, axis=-1)
|
329 |
+
binary_matrix = mask.astype(int)
|
330 |
+
return binary_matrix
|
331 |
+
|
332 |
+
def preprocess_mask(mask_, h, w, device):
|
333 |
+
mask = np.array(mask_)
|
334 |
+
mask = mask.astype(np.float32)
|
335 |
+
mask = mask[None, None]
|
336 |
+
mask[mask < 0.5] = 0
|
337 |
+
mask[mask >= 0.5] = 1
|
338 |
+
mask = torch.from_numpy(mask).to(device)
|
339 |
+
mask = torch.nn.functional.interpolate(mask, size=(h, w), mode='nearest')
|
340 |
+
return mask
|
341 |
+
|
342 |
+
def process_sketch(canvas_data):
|
343 |
+
binary_matrixes = []
|
344 |
+
base64_img = canvas_data['image']
|
345 |
+
image_data = base64.b64decode(base64_img.split(',')[1])
|
346 |
+
image = Image.open(BytesIO(image_data)).convert("RGB")
|
347 |
+
im2arr = np.array(image)
|
348 |
+
colors = [tuple(map(int, rgb[4:-1].split(','))) for rgb in canvas_data['colors']]
|
349 |
+
colors_fixed = []
|
350 |
+
|
351 |
+
r, g, b = 255, 255, 255
|
352 |
+
binary_matrix = create_binary_matrix(im2arr, (r,g,b))
|
353 |
+
binary_matrixes.append(binary_matrix)
|
354 |
+
binary_matrix_ = np.repeat(np.expand_dims(binary_matrix, axis=(-1)), 3, axis=(-1))
|
355 |
+
colored_map = binary_matrix_*(r,g,b) + (1-binary_matrix_)*(50,50,50)
|
356 |
+
colors_fixed.append(gr.update(value=colored_map.astype(np.uint8)))
|
357 |
+
|
358 |
+
for color in colors:
|
359 |
+
r, g, b = color
|
360 |
+
if any(c != 255 for c in (r, g, b)):
|
361 |
+
binary_matrix = create_binary_matrix(im2arr, (r,g,b))
|
362 |
+
binary_matrixes.append(binary_matrix)
|
363 |
+
binary_matrix_ = np.repeat(np.expand_dims(binary_matrix, axis=(-1)), 3, axis=(-1))
|
364 |
+
colored_map = binary_matrix_*(r,g,b) + (1-binary_matrix_)*(50,50,50)
|
365 |
+
colors_fixed.append(gr.update(value=colored_map.astype(np.uint8)))
|
366 |
+
|
367 |
+
visibilities = []
|
368 |
+
colors = []
|
369 |
+
for n in range(MAX_COLORS):
|
370 |
+
visibilities.append(gr.update(visible=False))
|
371 |
+
colors.append(gr.update())
|
372 |
+
for n in range(len(colors_fixed)):
|
373 |
+
visibilities[n] = gr.update(visible=True)
|
374 |
+
colors[n] = colors_fixed[n]
|
375 |
+
|
376 |
+
return [gr.update(visible=True), binary_matrixes, *visibilities, *colors]
|
377 |
+
|
378 |
+
def process_prompts(binary_matrixes, *seg_prompts):
|
379 |
+
return [gr.update(visible=True), gr.update(value=' , '.join(seg_prompts[:len(binary_matrixes)]))]
|
380 |
+
|
381 |
+
def process_example(layout_path, all_prompts, seed_):
|
382 |
+
|
383 |
+
all_prompts = all_prompts.split('***')
|
384 |
+
|
385 |
+
binary_matrixes = []
|
386 |
+
colors_fixed = []
|
387 |
+
|
388 |
+
im2arr = np.array(Image.open(layout_path))[:,:,:3]
|
389 |
+
unique, counts = np.unique(np.reshape(im2arr,(-1,3)), axis=0, return_counts=True)
|
390 |
+
sorted_idx = np.argsort(-counts)
|
391 |
+
|
392 |
+
binary_matrix = create_binary_matrix(im2arr, (0,0,0))
|
393 |
+
binary_matrixes.append(binary_matrix)
|
394 |
+
binary_matrix_ = np.repeat(np.expand_dims(binary_matrix, axis=(-1)), 3, axis=(-1))
|
395 |
+
colored_map = binary_matrix_*(255,255,255) + (1-binary_matrix_)*(50,50,50)
|
396 |
+
colors_fixed.append(gr.update(value=colored_map.astype(np.uint8)))
|
397 |
+
|
398 |
+
for i in range(len(all_prompts)-1):
|
399 |
+
r, g, b = unique[sorted_idx[i]]
|
400 |
+
if any(c != 255 for c in (r, g, b)) and any(c != 0 for c in (r, g, b)):
|
401 |
+
binary_matrix = create_binary_matrix(im2arr, (r,g,b))
|
402 |
+
binary_matrixes.append(binary_matrix)
|
403 |
+
binary_matrix_ = np.repeat(np.expand_dims(binary_matrix, axis=(-1)), 3, axis=(-1))
|
404 |
+
colored_map = binary_matrix_*(r,g,b) + (1-binary_matrix_)*(50,50,50)
|
405 |
+
colors_fixed.append(gr.update(value=colored_map.astype(np.uint8)))
|
406 |
+
|
407 |
+
visibilities = []
|
408 |
+
colors = []
|
409 |
+
prompts = []
|
410 |
+
for n in range(MAX_COLORS):
|
411 |
+
visibilities.append(gr.update(visible=False))
|
412 |
+
colors.append(gr.update())
|
413 |
+
prompts.append(gr.update())
|
414 |
+
|
415 |
+
for n in range(len(colors_fixed)):
|
416 |
+
visibilities[n] = gr.update(visible=True)
|
417 |
+
colors[n] = colors_fixed[n]
|
418 |
+
prompts[n] = all_prompts[n+1]
|
419 |
+
|
420 |
+
return [gr.update(visible=True), binary_matrixes, *visibilities, *colors, *prompts,
|
421 |
+
gr.update(visible=True), gr.update(value=all_prompts[0]), int(seed_)]
|