ACCC1380 commited on
Commit
a82af9a
1 Parent(s): b1d9e54

Upload lora-scripts/sd-scripts/library/sai_model_spec.py with huggingface_hub

Browse files
lora-scripts/sd-scripts/library/sai_model_spec.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # based on https://github.com/Stability-AI/ModelSpec
2
+ import datetime
3
+ import hashlib
4
+ from io import BytesIO
5
+ import os
6
+ from typing import List, Optional, Tuple, Union
7
+ import safetensors
8
+ from library.utils import setup_logging
9
+ setup_logging()
10
+ import logging
11
+ logger = logging.getLogger(__name__)
12
+
13
+ r"""
14
+ # Metadata Example
15
+ metadata = {
16
+ # === Must ===
17
+ "modelspec.sai_model_spec": "1.0.0", # Required version ID for the spec
18
+ "modelspec.architecture": "stable-diffusion-xl-v1-base", # Architecture, reference the ID of the original model of the arch to match the ID
19
+ "modelspec.implementation": "sgm",
20
+ "modelspec.title": "Example Model Version 1.0", # Clean, human-readable title. May use your own phrasing/language/etc
21
+ # === Should ===
22
+ "modelspec.author": "Example Corp", # Your name or company name
23
+ "modelspec.description": "This is my example model to show you how to do it!", # Describe the model in your own words/language/etc. Focus on what users need to know
24
+ "modelspec.date": "2023-07-20", # ISO-8601 compliant date of when the model was created
25
+ # === Can ===
26
+ "modelspec.license": "ExampleLicense-1.0", # eg CreativeML Open RAIL, etc.
27
+ "modelspec.usage_hint": "Use keyword 'example'" # In your own language, very short hints about how the user should use the model
28
+ }
29
+ """
30
+
31
+ BASE_METADATA = {
32
+ # === Must ===
33
+ "modelspec.sai_model_spec": "1.0.0", # Required version ID for the spec
34
+ "modelspec.architecture": None,
35
+ "modelspec.implementation": None,
36
+ "modelspec.title": None,
37
+ "modelspec.resolution": None,
38
+ # === Should ===
39
+ "modelspec.description": None,
40
+ "modelspec.author": None,
41
+ "modelspec.date": None,
42
+ # === Can ===
43
+ "modelspec.license": None,
44
+ "modelspec.tags": None,
45
+ "modelspec.merged_from": None,
46
+ "modelspec.prediction_type": None,
47
+ "modelspec.timestep_range": None,
48
+ "modelspec.encoder_layer": None,
49
+ }
50
+
51
+ # 別に使うやつだけ定義
52
+ MODELSPEC_TITLE = "modelspec.title"
53
+
54
+ ARCH_SD_V1 = "stable-diffusion-v1"
55
+ ARCH_SD_V2_512 = "stable-diffusion-v2-512"
56
+ ARCH_SD_V2_768_V = "stable-diffusion-v2-768-v"
57
+ ARCH_SD_XL_V1_BASE = "stable-diffusion-xl-v1-base"
58
+
59
+ ADAPTER_LORA = "lora"
60
+ ADAPTER_TEXTUAL_INVERSION = "textual-inversion"
61
+
62
+ IMPL_STABILITY_AI = "https://github.com/Stability-AI/generative-models"
63
+ IMPL_DIFFUSERS = "diffusers"
64
+
65
+ PRED_TYPE_EPSILON = "epsilon"
66
+ PRED_TYPE_V = "v"
67
+
68
+
69
+ def load_bytes_in_safetensors(tensors):
70
+ bytes = safetensors.torch.save(tensors)
71
+ b = BytesIO(bytes)
72
+
73
+ b.seek(0)
74
+ header = b.read(8)
75
+ n = int.from_bytes(header, "little")
76
+
77
+ offset = n + 8
78
+ b.seek(offset)
79
+
80
+ return b.read()
81
+
82
+
83
+ def precalculate_safetensors_hashes(state_dict):
84
+ # calculate each tensor one by one to reduce memory usage
85
+ hash_sha256 = hashlib.sha256()
86
+ for tensor in state_dict.values():
87
+ single_tensor_sd = {"tensor": tensor}
88
+ bytes_for_tensor = load_bytes_in_safetensors(single_tensor_sd)
89
+ hash_sha256.update(bytes_for_tensor)
90
+
91
+ return f"0x{hash_sha256.hexdigest()}"
92
+
93
+
94
+ def update_hash_sha256(metadata: dict, state_dict: dict):
95
+ raise NotImplementedError
96
+
97
+
98
+ def build_metadata(
99
+ state_dict: Optional[dict],
100
+ v2: bool,
101
+ v_parameterization: bool,
102
+ sdxl: bool,
103
+ lora: bool,
104
+ textual_inversion: bool,
105
+ timestamp: float,
106
+ title: Optional[str] = None,
107
+ reso: Optional[Union[int, Tuple[int, int]]] = None,
108
+ is_stable_diffusion_ckpt: Optional[bool] = None,
109
+ author: Optional[str] = None,
110
+ description: Optional[str] = None,
111
+ license: Optional[str] = None,
112
+ tags: Optional[str] = None,
113
+ merged_from: Optional[str] = None,
114
+ timesteps: Optional[Tuple[int, int]] = None,
115
+ clip_skip: Optional[int] = None,
116
+ ):
117
+ # if state_dict is None, hash is not calculated
118
+
119
+ metadata = {}
120
+ metadata.update(BASE_METADATA)
121
+
122
+ # TODO メモリを消費せずかつ正しいハッシュ計算の方法がわかったら実装する
123
+ # if state_dict is not None:
124
+ # hash = precalculate_safetensors_hashes(state_dict)
125
+ # metadata["modelspec.hash_sha256"] = hash
126
+
127
+ if sdxl:
128
+ arch = ARCH_SD_XL_V1_BASE
129
+ elif v2:
130
+ if v_parameterization:
131
+ arch = ARCH_SD_V2_768_V
132
+ else:
133
+ arch = ARCH_SD_V2_512
134
+ else:
135
+ arch = ARCH_SD_V1
136
+
137
+ if lora:
138
+ arch += f"/{ADAPTER_LORA}"
139
+ elif textual_inversion:
140
+ arch += f"/{ADAPTER_TEXTUAL_INVERSION}"
141
+
142
+ metadata["modelspec.architecture"] = arch
143
+
144
+ if not lora and not textual_inversion and is_stable_diffusion_ckpt is None:
145
+ is_stable_diffusion_ckpt = True # default is stable diffusion ckpt if not lora and not textual_inversion
146
+
147
+ if (lora and sdxl) or textual_inversion or is_stable_diffusion_ckpt:
148
+ # Stable Diffusion ckpt, TI, SDXL LoRA
149
+ impl = IMPL_STABILITY_AI
150
+ else:
151
+ # v1/v2 LoRA or Diffusers
152
+ impl = IMPL_DIFFUSERS
153
+ metadata["modelspec.implementation"] = impl
154
+
155
+ if title is None:
156
+ if lora:
157
+ title = "LoRA"
158
+ elif textual_inversion:
159
+ title = "TextualInversion"
160
+ else:
161
+ title = "Checkpoint"
162
+ title += f"@{timestamp}"
163
+ metadata[MODELSPEC_TITLE] = title
164
+
165
+ if author is not None:
166
+ metadata["modelspec.author"] = author
167
+ else:
168
+ del metadata["modelspec.author"]
169
+
170
+ if description is not None:
171
+ metadata["modelspec.description"] = description
172
+ else:
173
+ del metadata["modelspec.description"]
174
+
175
+ if merged_from is not None:
176
+ metadata["modelspec.merged_from"] = merged_from
177
+ else:
178
+ del metadata["modelspec.merged_from"]
179
+
180
+ if license is not None:
181
+ metadata["modelspec.license"] = license
182
+ else:
183
+ del metadata["modelspec.license"]
184
+
185
+ if tags is not None:
186
+ metadata["modelspec.tags"] = tags
187
+ else:
188
+ del metadata["modelspec.tags"]
189
+
190
+ # remove microsecond from time
191
+ int_ts = int(timestamp)
192
+
193
+ # time to iso-8601 compliant date
194
+ date = datetime.datetime.fromtimestamp(int_ts).isoformat()
195
+ metadata["modelspec.date"] = date
196
+
197
+ if reso is not None:
198
+ # comma separated to tuple
199
+ if isinstance(reso, str):
200
+ reso = tuple(map(int, reso.split(",")))
201
+ if len(reso) == 1:
202
+ reso = (reso[0], reso[0])
203
+ else:
204
+ # resolution is defined in dataset, so use default
205
+ if sdxl:
206
+ reso = 1024
207
+ elif v2 and v_parameterization:
208
+ reso = 768
209
+ else:
210
+ reso = 512
211
+ if isinstance(reso, int):
212
+ reso = (reso, reso)
213
+
214
+ metadata["modelspec.resolution"] = f"{reso[0]}x{reso[1]}"
215
+
216
+ if v_parameterization:
217
+ metadata["modelspec.prediction_type"] = PRED_TYPE_V
218
+ else:
219
+ metadata["modelspec.prediction_type"] = PRED_TYPE_EPSILON
220
+
221
+ if timesteps is not None:
222
+ if isinstance(timesteps, str) or isinstance(timesteps, int):
223
+ timesteps = (timesteps, timesteps)
224
+ if len(timesteps) == 1:
225
+ timesteps = (timesteps[0], timesteps[0])
226
+ metadata["modelspec.timestep_range"] = f"{timesteps[0]},{timesteps[1]}"
227
+ else:
228
+ del metadata["modelspec.timestep_range"]
229
+
230
+ if clip_skip is not None:
231
+ metadata["modelspec.encoder_layer"] = f"{clip_skip}"
232
+ else:
233
+ del metadata["modelspec.encoder_layer"]
234
+
235
+ # # assert all values are filled
236
+ # assert all([v is not None for v in metadata.values()]), metadata
237
+ if not all([v is not None for v in metadata.values()]):
238
+ logger.error(f"Internal error: some metadata values are None: {metadata}")
239
+
240
+ return metadata
241
+
242
+
243
+ # region utils
244
+
245
+
246
+ def get_title(metadata: dict) -> Optional[str]:
247
+ return metadata.get(MODELSPEC_TITLE, None)
248
+
249
+
250
+ def load_metadata_from_safetensors(model: str) -> dict:
251
+ if not model.endswith(".safetensors"):
252
+ return {}
253
+
254
+ with safetensors.safe_open(model, framework="pt") as f:
255
+ metadata = f.metadata()
256
+ if metadata is None:
257
+ metadata = {}
258
+ return metadata
259
+
260
+
261
+ def build_merged_from(models: List[str]) -> str:
262
+ def get_title(model: str):
263
+ metadata = load_metadata_from_safetensors(model)
264
+ title = metadata.get(MODELSPEC_TITLE, None)
265
+ if title is None:
266
+ title = os.path.splitext(os.path.basename(model))[0] # use filename
267
+ return title
268
+
269
+ titles = [get_title(model) for model in models]
270
+ return ", ".join(titles)
271
+
272
+
273
+ # endregion
274
+
275
+
276
+ r"""
277
+ if __name__ == "__main__":
278
+ import argparse
279
+ import torch
280
+ from safetensors.torch import load_file
281
+ from library import train_util
282
+
283
+ parser = argparse.ArgumentParser()
284
+ parser.add_argument("--ckpt", type=str, required=True)
285
+ args = parser.parse_args()
286
+
287
+ print(f"Loading {args.ckpt}")
288
+ state_dict = load_file(args.ckpt)
289
+
290
+ print(f"Calculating metadata")
291
+ metadata = get(state_dict, False, False, False, False, "sgm", False, False, "title", "date", 256, 1000, 0)
292
+ print(metadata)
293
+ del state_dict
294
+
295
+ # by reference implementation
296
+ with open(args.ckpt, mode="rb") as file_data:
297
+ file_hash = hashlib.sha256()
298
+ head_len = struct.unpack("Q", file_data.read(8)) # int64 header length prefix
299
+ header = json.loads(file_data.read(head_len[0])) # header itself, json string
300
+ content = (
301
+ file_data.read()
302
+ ) # All other content is tightly packed tensors. Copy to RAM for simplicity, but you can avoid this read with a more careful FS-dependent impl.
303
+ file_hash.update(content)
304
+ # ===== Update the hash for modelspec =====
305
+ by_ref = f"0x{file_hash.hexdigest()}"
306
+ print(by_ref)
307
+ print("is same?", by_ref == metadata["modelspec.hash_sha256"])
308
+
309
+ """