miaw1419 commited on
Commit
0aaa1f1
1 Parent(s): cb7b6f3

Upload 472 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. diffusers/__init__.py +760 -0
  2. diffusers/__pycache__/__init__.cpython-38.pyc +0 -0
  3. diffusers/__pycache__/configuration_utils.cpython-38.pyc +0 -0
  4. diffusers/__pycache__/dependency_versions_check.cpython-38.pyc +0 -0
  5. diffusers/__pycache__/dependency_versions_table.cpython-38.pyc +0 -0
  6. diffusers/__pycache__/image_processor.cpython-38.pyc +0 -0
  7. diffusers/commands/__init__.py +27 -0
  8. diffusers/commands/diffusers_cli.py +43 -0
  9. diffusers/commands/env.py +84 -0
  10. diffusers/commands/fp16_safetensors.py +132 -0
  11. diffusers/configuration_utils.py +699 -0
  12. diffusers/dependency_versions_check.py +34 -0
  13. diffusers/dependency_versions_table.py +45 -0
  14. diffusers/experimental/README.md +5 -0
  15. diffusers/experimental/__init__.py +1 -0
  16. diffusers/experimental/rl/__init__.py +1 -0
  17. diffusers/experimental/rl/value_guided_sampling.py +153 -0
  18. diffusers/image_processor.py +884 -0
  19. diffusers/loaders/__init__.py +86 -0
  20. diffusers/loaders/__pycache__/__init__.cpython-38.pyc +0 -0
  21. diffusers/loaders/__pycache__/ip_adapter.cpython-38.pyc +0 -0
  22. diffusers/loaders/__pycache__/lora.cpython-38.pyc +0 -0
  23. diffusers/loaders/__pycache__/lora_conversion_utils.cpython-38.pyc +0 -0
  24. diffusers/loaders/__pycache__/peft.cpython-38.pyc +0 -0
  25. diffusers/loaders/__pycache__/single_file.cpython-38.pyc +0 -0
  26. diffusers/loaders/__pycache__/textual_inversion.cpython-38.pyc +0 -0
  27. diffusers/loaders/__pycache__/unet.cpython-38.pyc +0 -0
  28. diffusers/loaders/__pycache__/utils.cpython-38.pyc +0 -0
  29. diffusers/loaders/ip_adapter.py +190 -0
  30. diffusers/loaders/lora.py +1554 -0
  31. diffusers/loaders/lora_conversion_utils.py +284 -0
  32. diffusers/loaders/peft.py +186 -0
  33. diffusers/loaders/single_file.py +626 -0
  34. diffusers/loaders/textual_inversion.py +455 -0
  35. diffusers/loaders/unet.py +828 -0
  36. diffusers/loaders/utils.py +59 -0
  37. diffusers/models/README.md +3 -0
  38. diffusers/models/__init__.py +97 -0
  39. diffusers/models/__pycache__/__init__.cpython-38.pyc +0 -0
  40. diffusers/models/__pycache__/activations.cpython-38.pyc +0 -0
  41. diffusers/models/__pycache__/attention.cpython-38.pyc +0 -0
  42. diffusers/models/__pycache__/attention_processor.cpython-38.pyc +0 -0
  43. diffusers/models/__pycache__/downsampling.cpython-38.pyc +0 -0
  44. diffusers/models/__pycache__/dual_transformer_2d.cpython-38.pyc +0 -0
  45. diffusers/models/__pycache__/embeddings.cpython-38.pyc +0 -0
  46. diffusers/models/__pycache__/lora.cpython-38.pyc +0 -0
  47. diffusers/models/__pycache__/modeling_outputs.cpython-38.pyc +0 -0
  48. diffusers/models/__pycache__/modeling_utils.cpython-38.pyc +0 -0
  49. diffusers/models/__pycache__/normalization.cpython-38.pyc +0 -0
  50. diffusers/models/__pycache__/resnet.cpython-38.pyc +0 -0
diffusers/__init__.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __version__ = "0.26.0.dev0"
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ from .utils import (
6
+ DIFFUSERS_SLOW_IMPORT,
7
+ OptionalDependencyNotAvailable,
8
+ _LazyModule,
9
+ is_flax_available,
10
+ is_k_diffusion_available,
11
+ is_librosa_available,
12
+ is_note_seq_available,
13
+ is_onnx_available,
14
+ is_scipy_available,
15
+ is_torch_available,
16
+ is_torchsde_available,
17
+ is_transformers_available,
18
+ )
19
+
20
+
21
+ # Lazy Import based on
22
+ # https://github.com/huggingface/transformers/blob/main/src/transformers/__init__.py
23
+
24
+ # When adding a new object to this init, please add it to `_import_structure`. The `_import_structure` is a dictionary submodule to list of object names,
25
+ # and is used to defer the actual importing for when the objects are requested.
26
+ # This way `import diffusers` provides the names in the namespace without actually importing anything (and especially none of the backends).
27
+
28
+ _import_structure = {
29
+ "configuration_utils": ["ConfigMixin"],
30
+ "models": [],
31
+ "pipelines": [],
32
+ "schedulers": [],
33
+ "utils": [
34
+ "OptionalDependencyNotAvailable",
35
+ "is_flax_available",
36
+ "is_inflect_available",
37
+ "is_invisible_watermark_available",
38
+ "is_k_diffusion_available",
39
+ "is_k_diffusion_version",
40
+ "is_librosa_available",
41
+ "is_note_seq_available",
42
+ "is_onnx_available",
43
+ "is_scipy_available",
44
+ "is_torch_available",
45
+ "is_torchsde_available",
46
+ "is_transformers_available",
47
+ "is_transformers_version",
48
+ "is_unidecode_available",
49
+ "logging",
50
+ ],
51
+ }
52
+
53
+ try:
54
+ if not is_onnx_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ from .utils import dummy_onnx_objects # noqa F403
58
+
59
+ _import_structure["utils.dummy_onnx_objects"] = [
60
+ name for name in dir(dummy_onnx_objects) if not name.startswith("_")
61
+ ]
62
+
63
+ else:
64
+ _import_structure["pipelines"].extend(["OnnxRuntimeModel"])
65
+
66
+ try:
67
+ if not is_torch_available():
68
+ raise OptionalDependencyNotAvailable()
69
+ except OptionalDependencyNotAvailable:
70
+ from .utils import dummy_pt_objects # noqa F403
71
+
72
+ _import_structure["utils.dummy_pt_objects"] = [name for name in dir(dummy_pt_objects) if not name.startswith("_")]
73
+
74
+ else:
75
+ _import_structure["models"].extend(
76
+ [
77
+ "AsymmetricAutoencoderKL",
78
+ "AutoencoderKL",
79
+ "AutoencoderKLTemporalDecoder",
80
+ "AutoencoderTiny",
81
+ "ConsistencyDecoderVAE",
82
+ "ControlNetModel",
83
+ "Kandinsky3UNet",
84
+ "ModelMixin",
85
+ "MotionAdapter",
86
+ "MultiAdapter",
87
+ "PriorTransformer",
88
+ "T2IAdapter",
89
+ "T5FilmDecoder",
90
+ "Transformer2DModel",
91
+ "UNet1DModel",
92
+ "UNet2DConditionModel",
93
+ "UNet2DModel",
94
+ "UNet3DConditionModel",
95
+ "UNetMotionModel",
96
+ "UNetSpatioTemporalConditionModel",
97
+ "UVit2DModel",
98
+ "VQModel",
99
+ ]
100
+ )
101
+
102
+ _import_structure["optimization"] = [
103
+ "get_constant_schedule",
104
+ "get_constant_schedule_with_warmup",
105
+ "get_cosine_schedule_with_warmup",
106
+ "get_cosine_with_hard_restarts_schedule_with_warmup",
107
+ "get_linear_schedule_with_warmup",
108
+ "get_polynomial_decay_schedule_with_warmup",
109
+ "get_scheduler",
110
+ ]
111
+ _import_structure["pipelines"].extend(
112
+ [
113
+ "AudioPipelineOutput",
114
+ "AutoPipelineForImage2Image",
115
+ "AutoPipelineForInpainting",
116
+ "AutoPipelineForText2Image",
117
+ "ConsistencyModelPipeline",
118
+ "DanceDiffusionPipeline",
119
+ "DDIMPipeline",
120
+ "DDPMPipeline",
121
+ "DiffusionPipeline",
122
+ "DiTPipeline",
123
+ "ImagePipelineOutput",
124
+ "KarrasVePipeline",
125
+ "LDMPipeline",
126
+ "LDMSuperResolutionPipeline",
127
+ "PNDMPipeline",
128
+ "RePaintPipeline",
129
+ "ScoreSdeVePipeline",
130
+ ]
131
+ )
132
+ _import_structure["schedulers"].extend(
133
+ [
134
+ "AmusedScheduler",
135
+ "CMStochasticIterativeScheduler",
136
+ "DDIMInverseScheduler",
137
+ "DDIMParallelScheduler",
138
+ "DDIMScheduler",
139
+ "DDPMParallelScheduler",
140
+ "DDPMScheduler",
141
+ "DDPMWuerstchenScheduler",
142
+ "DEISMultistepScheduler",
143
+ "DPMSolverMultistepInverseScheduler",
144
+ "DPMSolverMultistepScheduler",
145
+ "DPMSolverSinglestepScheduler",
146
+ "EulerAncestralDiscreteScheduler",
147
+ "EulerDiscreteScheduler",
148
+ "HeunDiscreteScheduler",
149
+ "IPNDMScheduler",
150
+ "KarrasVeScheduler",
151
+ "KDPM2AncestralDiscreteScheduler",
152
+ "KDPM2DiscreteScheduler",
153
+ "LCMScheduler",
154
+ "PNDMScheduler",
155
+ "RePaintScheduler",
156
+ "SASolverScheduler",
157
+ "SchedulerMixin",
158
+ "ScoreSdeVeScheduler",
159
+ "UnCLIPScheduler",
160
+ "UniPCMultistepScheduler",
161
+ "VQDiffusionScheduler",
162
+ ]
163
+ )
164
+ _import_structure["training_utils"] = ["EMAModel"]
165
+
166
+ try:
167
+ if not (is_torch_available() and is_scipy_available()):
168
+ raise OptionalDependencyNotAvailable()
169
+ except OptionalDependencyNotAvailable:
170
+ from .utils import dummy_torch_and_scipy_objects # noqa F403
171
+
172
+ _import_structure["utils.dummy_torch_and_scipy_objects"] = [
173
+ name for name in dir(dummy_torch_and_scipy_objects) if not name.startswith("_")
174
+ ]
175
+
176
+ else:
177
+ _import_structure["schedulers"].extend(["LMSDiscreteScheduler"])
178
+
179
+ try:
180
+ if not (is_torch_available() and is_torchsde_available()):
181
+ raise OptionalDependencyNotAvailable()
182
+ except OptionalDependencyNotAvailable:
183
+ from .utils import dummy_torch_and_torchsde_objects # noqa F403
184
+
185
+ _import_structure["utils.dummy_torch_and_torchsde_objects"] = [
186
+ name for name in dir(dummy_torch_and_torchsde_objects) if not name.startswith("_")
187
+ ]
188
+
189
+ else:
190
+ _import_structure["schedulers"].extend(["DPMSolverSDEScheduler"])
191
+
192
+ try:
193
+ if not (is_torch_available() and is_transformers_available()):
194
+ raise OptionalDependencyNotAvailable()
195
+ except OptionalDependencyNotAvailable:
196
+ from .utils import dummy_torch_and_transformers_objects # noqa F403
197
+
198
+ _import_structure["utils.dummy_torch_and_transformers_objects"] = [
199
+ name for name in dir(dummy_torch_and_transformers_objects) if not name.startswith("_")
200
+ ]
201
+
202
+ else:
203
+ _import_structure["pipelines"].extend(
204
+ [
205
+ "AltDiffusionImg2ImgPipeline",
206
+ "AltDiffusionPipeline",
207
+ "AmusedImg2ImgPipeline",
208
+ "AmusedInpaintPipeline",
209
+ "AmusedPipeline",
210
+ "AnimateDiffPipeline",
211
+ "AudioLDM2Pipeline",
212
+ "AudioLDM2ProjectionModel",
213
+ "AudioLDM2UNet2DConditionModel",
214
+ "AudioLDMPipeline",
215
+ "BlipDiffusionControlNetPipeline",
216
+ "BlipDiffusionPipeline",
217
+ "CLIPImageProjection",
218
+ "CycleDiffusionPipeline",
219
+ "IFImg2ImgPipeline",
220
+ "IFImg2ImgSuperResolutionPipeline",
221
+ "IFInpaintingPipeline",
222
+ "IFInpaintingSuperResolutionPipeline",
223
+ "IFPipeline",
224
+ "IFSuperResolutionPipeline",
225
+ "ImageTextPipelineOutput",
226
+ "Kandinsky3Img2ImgPipeline",
227
+ "Kandinsky3Pipeline",
228
+ "KandinskyCombinedPipeline",
229
+ "KandinskyImg2ImgCombinedPipeline",
230
+ "KandinskyImg2ImgPipeline",
231
+ "KandinskyInpaintCombinedPipeline",
232
+ "KandinskyInpaintPipeline",
233
+ "KandinskyPipeline",
234
+ "KandinskyPriorPipeline",
235
+ "KandinskyV22CombinedPipeline",
236
+ "KandinskyV22ControlnetImg2ImgPipeline",
237
+ "KandinskyV22ControlnetPipeline",
238
+ "KandinskyV22Img2ImgCombinedPipeline",
239
+ "KandinskyV22Img2ImgPipeline",
240
+ "KandinskyV22InpaintCombinedPipeline",
241
+ "KandinskyV22InpaintPipeline",
242
+ "KandinskyV22Pipeline",
243
+ "KandinskyV22PriorEmb2EmbPipeline",
244
+ "KandinskyV22PriorPipeline",
245
+ "LatentConsistencyModelImg2ImgPipeline",
246
+ "LatentConsistencyModelPipeline",
247
+ "LDMTextToImagePipeline",
248
+ "MusicLDMPipeline",
249
+ "PaintByExamplePipeline",
250
+ "PixArtAlphaPipeline",
251
+ "SemanticStableDiffusionPipeline",
252
+ "ShapEImg2ImgPipeline",
253
+ "ShapEPipeline",
254
+ "StableDiffusionAdapterPipeline",
255
+ "StableDiffusionAttendAndExcitePipeline",
256
+ "StableDiffusionControlNetImg2ImgPipeline",
257
+ "StableDiffusionControlNetInpaintPipeline",
258
+ "StableDiffusionControlNetPipeline",
259
+ "StableDiffusionDepth2ImgPipeline",
260
+ "StableDiffusionDiffEditPipeline",
261
+ "StableDiffusionGLIGENPipeline",
262
+ "StableDiffusionGLIGENTextImagePipeline",
263
+ "StableDiffusionImageVariationPipeline",
264
+ "StableDiffusionImg2ImgPipeline",
265
+ "StableDiffusionInpaintPipeline",
266
+ "StableDiffusionInpaintPipelineLegacy",
267
+ "StableDiffusionInstructPix2PixPipeline",
268
+ "StableDiffusionLatentUpscalePipeline",
269
+ "StableDiffusionLDM3DPipeline",
270
+ "StableDiffusionModelEditingPipeline",
271
+ "StableDiffusionPanoramaPipeline",
272
+ "StableDiffusionParadigmsPipeline",
273
+ "StableDiffusionPipeline",
274
+ "StableDiffusionPipelineSafe",
275
+ "StableDiffusionPix2PixZeroPipeline",
276
+ "StableDiffusionSAGPipeline",
277
+ "StableDiffusionUpscalePipeline",
278
+ "StableDiffusionXLAdapterPipeline",
279
+ "StableDiffusionXLControlNetImg2ImgPipeline",
280
+ "StableDiffusionXLControlNetInpaintPipeline",
281
+ "StableDiffusionXLControlNetPipeline",
282
+ "StableDiffusionXLImg2ImgPipeline",
283
+ "StableDiffusionXLInpaintPipeline",
284
+ "StableDiffusionXLInstructPix2PixPipeline",
285
+ "StableDiffusionXLPipeline",
286
+ "StableUnCLIPImg2ImgPipeline",
287
+ "StableUnCLIPPipeline",
288
+ "StableVideoDiffusionPipeline",
289
+ "TextToVideoSDPipeline",
290
+ "TextToVideoZeroPipeline",
291
+ "TextToVideoZeroSDXLPipeline",
292
+ "UnCLIPImageVariationPipeline",
293
+ "UnCLIPPipeline",
294
+ "UniDiffuserModel",
295
+ "UniDiffuserPipeline",
296
+ "UniDiffuserTextDecoder",
297
+ "VersatileDiffusionDualGuidedPipeline",
298
+ "VersatileDiffusionImageVariationPipeline",
299
+ "VersatileDiffusionPipeline",
300
+ "VersatileDiffusionTextToImagePipeline",
301
+ "VideoToVideoSDPipeline",
302
+ "VQDiffusionPipeline",
303
+ "WuerstchenCombinedPipeline",
304
+ "WuerstchenDecoderPipeline",
305
+ "WuerstchenPriorPipeline",
306
+ ]
307
+ )
308
+
309
+ try:
310
+ if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
311
+ raise OptionalDependencyNotAvailable()
312
+ except OptionalDependencyNotAvailable:
313
+ from .utils import dummy_torch_and_transformers_and_k_diffusion_objects # noqa F403
314
+
315
+ _import_structure["utils.dummy_torch_and_transformers_and_k_diffusion_objects"] = [
316
+ name for name in dir(dummy_torch_and_transformers_and_k_diffusion_objects) if not name.startswith("_")
317
+ ]
318
+
319
+ else:
320
+ _import_structure["pipelines"].extend(["StableDiffusionKDiffusionPipeline", "StableDiffusionXLKDiffusionPipeline"])
321
+
322
+ try:
323
+ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
324
+ raise OptionalDependencyNotAvailable()
325
+ except OptionalDependencyNotAvailable:
326
+ from .utils import dummy_torch_and_transformers_and_onnx_objects # noqa F403
327
+
328
+ _import_structure["utils.dummy_torch_and_transformers_and_onnx_objects"] = [
329
+ name for name in dir(dummy_torch_and_transformers_and_onnx_objects) if not name.startswith("_")
330
+ ]
331
+
332
+ else:
333
+ _import_structure["pipelines"].extend(
334
+ [
335
+ "OnnxStableDiffusionImg2ImgPipeline",
336
+ "OnnxStableDiffusionInpaintPipeline",
337
+ "OnnxStableDiffusionInpaintPipelineLegacy",
338
+ "OnnxStableDiffusionPipeline",
339
+ "OnnxStableDiffusionUpscalePipeline",
340
+ "StableDiffusionOnnxPipeline",
341
+ ]
342
+ )
343
+
344
+ try:
345
+ if not (is_torch_available() and is_librosa_available()):
346
+ raise OptionalDependencyNotAvailable()
347
+ except OptionalDependencyNotAvailable:
348
+ from .utils import dummy_torch_and_librosa_objects # noqa F403
349
+
350
+ _import_structure["utils.dummy_torch_and_librosa_objects"] = [
351
+ name for name in dir(dummy_torch_and_librosa_objects) if not name.startswith("_")
352
+ ]
353
+
354
+ else:
355
+ _import_structure["pipelines"].extend(["AudioDiffusionPipeline", "Mel"])
356
+
357
+ try:
358
+ if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
359
+ raise OptionalDependencyNotAvailable()
360
+ except OptionalDependencyNotAvailable:
361
+ from .utils import dummy_transformers_and_torch_and_note_seq_objects # noqa F403
362
+
363
+ _import_structure["utils.dummy_transformers_and_torch_and_note_seq_objects"] = [
364
+ name for name in dir(dummy_transformers_and_torch_and_note_seq_objects) if not name.startswith("_")
365
+ ]
366
+
367
+
368
+ else:
369
+ _import_structure["pipelines"].extend(["SpectrogramDiffusionPipeline"])
370
+
371
+ try:
372
+ if not is_flax_available():
373
+ raise OptionalDependencyNotAvailable()
374
+ except OptionalDependencyNotAvailable:
375
+ from .utils import dummy_flax_objects # noqa F403
376
+
377
+ _import_structure["utils.dummy_flax_objects"] = [
378
+ name for name in dir(dummy_flax_objects) if not name.startswith("_")
379
+ ]
380
+
381
+
382
+ else:
383
+ _import_structure["models.controlnet_flax"] = ["FlaxControlNetModel"]
384
+ _import_structure["models.modeling_flax_utils"] = ["FlaxModelMixin"]
385
+ _import_structure["models.unets.unet_2d_condition_flax"] = ["FlaxUNet2DConditionModel"]
386
+ _import_structure["models.vae_flax"] = ["FlaxAutoencoderKL"]
387
+ _import_structure["pipelines"].extend(["FlaxDiffusionPipeline"])
388
+ _import_structure["schedulers"].extend(
389
+ [
390
+ "FlaxDDIMScheduler",
391
+ "FlaxDDPMScheduler",
392
+ "FlaxDPMSolverMultistepScheduler",
393
+ "FlaxEulerDiscreteScheduler",
394
+ "FlaxKarrasVeScheduler",
395
+ "FlaxLMSDiscreteScheduler",
396
+ "FlaxPNDMScheduler",
397
+ "FlaxSchedulerMixin",
398
+ "FlaxScoreSdeVeScheduler",
399
+ ]
400
+ )
401
+
402
+
403
+ try:
404
+ if not (is_flax_available() and is_transformers_available()):
405
+ raise OptionalDependencyNotAvailable()
406
+ except OptionalDependencyNotAvailable:
407
+ from .utils import dummy_flax_and_transformers_objects # noqa F403
408
+
409
+ _import_structure["utils.dummy_flax_and_transformers_objects"] = [
410
+ name for name in dir(dummy_flax_and_transformers_objects) if not name.startswith("_")
411
+ ]
412
+
413
+
414
+ else:
415
+ _import_structure["pipelines"].extend(
416
+ [
417
+ "FlaxStableDiffusionControlNetPipeline",
418
+ "FlaxStableDiffusionImg2ImgPipeline",
419
+ "FlaxStableDiffusionInpaintPipeline",
420
+ "FlaxStableDiffusionPipeline",
421
+ "FlaxStableDiffusionXLPipeline",
422
+ ]
423
+ )
424
+
425
+ try:
426
+ if not (is_note_seq_available()):
427
+ raise OptionalDependencyNotAvailable()
428
+ except OptionalDependencyNotAvailable:
429
+ from .utils import dummy_note_seq_objects # noqa F403
430
+
431
+ _import_structure["utils.dummy_note_seq_objects"] = [
432
+ name for name in dir(dummy_note_seq_objects) if not name.startswith("_")
433
+ ]
434
+
435
+
436
+ else:
437
+ _import_structure["pipelines"].extend(["MidiProcessor"])
438
+
439
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
440
+ from .configuration_utils import ConfigMixin
441
+
442
+ try:
443
+ if not is_onnx_available():
444
+ raise OptionalDependencyNotAvailable()
445
+ except OptionalDependencyNotAvailable:
446
+ from .utils.dummy_onnx_objects import * # noqa F403
447
+ else:
448
+ from .pipelines import OnnxRuntimeModel
449
+
450
+ try:
451
+ if not is_torch_available():
452
+ raise OptionalDependencyNotAvailable()
453
+ except OptionalDependencyNotAvailable:
454
+ from .utils.dummy_pt_objects import * # noqa F403
455
+ else:
456
+ from .models import (
457
+ AsymmetricAutoencoderKL,
458
+ AutoencoderKL,
459
+ AutoencoderKLTemporalDecoder,
460
+ AutoencoderTiny,
461
+ ConsistencyDecoderVAE,
462
+ ControlNetModel,
463
+ Kandinsky3UNet,
464
+ ModelMixin,
465
+ MotionAdapter,
466
+ MultiAdapter,
467
+ PriorTransformer,
468
+ T2IAdapter,
469
+ T5FilmDecoder,
470
+ Transformer2DModel,
471
+ UNet1DModel,
472
+ UNet2DConditionModel,
473
+ UNet2DModel,
474
+ UNet3DConditionModel,
475
+ UNetMotionModel,
476
+ UNetSpatioTemporalConditionModel,
477
+ UVit2DModel,
478
+ VQModel,
479
+ )
480
+ from .optimization import (
481
+ get_constant_schedule,
482
+ get_constant_schedule_with_warmup,
483
+ get_cosine_schedule_with_warmup,
484
+ get_cosine_with_hard_restarts_schedule_with_warmup,
485
+ get_linear_schedule_with_warmup,
486
+ get_polynomial_decay_schedule_with_warmup,
487
+ get_scheduler,
488
+ )
489
+ from .pipelines import (
490
+ AudioPipelineOutput,
491
+ AutoPipelineForImage2Image,
492
+ AutoPipelineForInpainting,
493
+ AutoPipelineForText2Image,
494
+ BlipDiffusionControlNetPipeline,
495
+ BlipDiffusionPipeline,
496
+ CLIPImageProjection,
497
+ ConsistencyModelPipeline,
498
+ DanceDiffusionPipeline,
499
+ DDIMPipeline,
500
+ DDPMPipeline,
501
+ DiffusionPipeline,
502
+ DiTPipeline,
503
+ ImagePipelineOutput,
504
+ KarrasVePipeline,
505
+ LDMPipeline,
506
+ LDMSuperResolutionPipeline,
507
+ PNDMPipeline,
508
+ RePaintPipeline,
509
+ ScoreSdeVePipeline,
510
+ )
511
+ from .schedulers import (
512
+ AmusedScheduler,
513
+ CMStochasticIterativeScheduler,
514
+ DDIMInverseScheduler,
515
+ DDIMParallelScheduler,
516
+ DDIMScheduler,
517
+ DDPMParallelScheduler,
518
+ DDPMScheduler,
519
+ DDPMWuerstchenScheduler,
520
+ DEISMultistepScheduler,
521
+ DPMSolverMultistepInverseScheduler,
522
+ DPMSolverMultistepScheduler,
523
+ DPMSolverSinglestepScheduler,
524
+ EulerAncestralDiscreteScheduler,
525
+ EulerDiscreteScheduler,
526
+ HeunDiscreteScheduler,
527
+ IPNDMScheduler,
528
+ KarrasVeScheduler,
529
+ KDPM2AncestralDiscreteScheduler,
530
+ KDPM2DiscreteScheduler,
531
+ LCMScheduler,
532
+ PNDMScheduler,
533
+ RePaintScheduler,
534
+ SASolverScheduler,
535
+ SchedulerMixin,
536
+ ScoreSdeVeScheduler,
537
+ UnCLIPScheduler,
538
+ UniPCMultistepScheduler,
539
+ VQDiffusionScheduler,
540
+ )
541
+ from .training_utils import EMAModel
542
+
543
+ try:
544
+ if not (is_torch_available() and is_scipy_available()):
545
+ raise OptionalDependencyNotAvailable()
546
+ except OptionalDependencyNotAvailable:
547
+ from .utils.dummy_torch_and_scipy_objects import * # noqa F403
548
+ else:
549
+ from .schedulers import LMSDiscreteScheduler
550
+
551
+ try:
552
+ if not (is_torch_available() and is_torchsde_available()):
553
+ raise OptionalDependencyNotAvailable()
554
+ except OptionalDependencyNotAvailable:
555
+ from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
556
+ else:
557
+ from .schedulers import DPMSolverSDEScheduler
558
+
559
+ try:
560
+ if not (is_torch_available() and is_transformers_available()):
561
+ raise OptionalDependencyNotAvailable()
562
+ except OptionalDependencyNotAvailable:
563
+ from .utils.dummy_torch_and_transformers_objects import * # noqa F403
564
+ else:
565
+ from .pipelines import (
566
+ AltDiffusionImg2ImgPipeline,
567
+ AltDiffusionPipeline,
568
+ AmusedImg2ImgPipeline,
569
+ AmusedInpaintPipeline,
570
+ AmusedPipeline,
571
+ AnimateDiffPipeline,
572
+ AudioLDM2Pipeline,
573
+ AudioLDM2ProjectionModel,
574
+ AudioLDM2UNet2DConditionModel,
575
+ AudioLDMPipeline,
576
+ CLIPImageProjection,
577
+ CycleDiffusionPipeline,
578
+ IFImg2ImgPipeline,
579
+ IFImg2ImgSuperResolutionPipeline,
580
+ IFInpaintingPipeline,
581
+ IFInpaintingSuperResolutionPipeline,
582
+ IFPipeline,
583
+ IFSuperResolutionPipeline,
584
+ ImageTextPipelineOutput,
585
+ Kandinsky3Img2ImgPipeline,
586
+ Kandinsky3Pipeline,
587
+ KandinskyCombinedPipeline,
588
+ KandinskyImg2ImgCombinedPipeline,
589
+ KandinskyImg2ImgPipeline,
590
+ KandinskyInpaintCombinedPipeline,
591
+ KandinskyInpaintPipeline,
592
+ KandinskyPipeline,
593
+ KandinskyPriorPipeline,
594
+ KandinskyV22CombinedPipeline,
595
+ KandinskyV22ControlnetImg2ImgPipeline,
596
+ KandinskyV22ControlnetPipeline,
597
+ KandinskyV22Img2ImgCombinedPipeline,
598
+ KandinskyV22Img2ImgPipeline,
599
+ KandinskyV22InpaintCombinedPipeline,
600
+ KandinskyV22InpaintPipeline,
601
+ KandinskyV22Pipeline,
602
+ KandinskyV22PriorEmb2EmbPipeline,
603
+ KandinskyV22PriorPipeline,
604
+ LatentConsistencyModelImg2ImgPipeline,
605
+ LatentConsistencyModelPipeline,
606
+ LDMTextToImagePipeline,
607
+ MusicLDMPipeline,
608
+ PaintByExamplePipeline,
609
+ PixArtAlphaPipeline,
610
+ SemanticStableDiffusionPipeline,
611
+ ShapEImg2ImgPipeline,
612
+ ShapEPipeline,
613
+ StableDiffusionAdapterPipeline,
614
+ StableDiffusionAttendAndExcitePipeline,
615
+ StableDiffusionControlNetImg2ImgPipeline,
616
+ StableDiffusionControlNetInpaintPipeline,
617
+ StableDiffusionControlNetPipeline,
618
+ StableDiffusionDepth2ImgPipeline,
619
+ StableDiffusionDiffEditPipeline,
620
+ StableDiffusionGLIGENPipeline,
621
+ StableDiffusionGLIGENTextImagePipeline,
622
+ StableDiffusionImageVariationPipeline,
623
+ StableDiffusionImg2ImgPipeline,
624
+ StableDiffusionInpaintPipeline,
625
+ StableDiffusionInpaintPipelineLegacy,
626
+ StableDiffusionInstructPix2PixPipeline,
627
+ StableDiffusionLatentUpscalePipeline,
628
+ StableDiffusionLDM3DPipeline,
629
+ StableDiffusionModelEditingPipeline,
630
+ StableDiffusionPanoramaPipeline,
631
+ StableDiffusionParadigmsPipeline,
632
+ StableDiffusionPipeline,
633
+ StableDiffusionPipelineSafe,
634
+ StableDiffusionPix2PixZeroPipeline,
635
+ StableDiffusionSAGPipeline,
636
+ StableDiffusionUpscalePipeline,
637
+ StableDiffusionXLAdapterPipeline,
638
+ StableDiffusionXLControlNetImg2ImgPipeline,
639
+ StableDiffusionXLControlNetInpaintPipeline,
640
+ StableDiffusionXLControlNetPipeline,
641
+ StableDiffusionXLImg2ImgPipeline,
642
+ StableDiffusionXLInpaintPipeline,
643
+ StableDiffusionXLInstructPix2PixPipeline,
644
+ StableDiffusionXLPipeline,
645
+ StableUnCLIPImg2ImgPipeline,
646
+ StableUnCLIPPipeline,
647
+ StableVideoDiffusionPipeline,
648
+ TextToVideoSDPipeline,
649
+ TextToVideoZeroPipeline,
650
+ TextToVideoZeroSDXLPipeline,
651
+ UnCLIPImageVariationPipeline,
652
+ UnCLIPPipeline,
653
+ UniDiffuserModel,
654
+ UniDiffuserPipeline,
655
+ UniDiffuserTextDecoder,
656
+ VersatileDiffusionDualGuidedPipeline,
657
+ VersatileDiffusionImageVariationPipeline,
658
+ VersatileDiffusionPipeline,
659
+ VersatileDiffusionTextToImagePipeline,
660
+ VideoToVideoSDPipeline,
661
+ VQDiffusionPipeline,
662
+ WuerstchenCombinedPipeline,
663
+ WuerstchenDecoderPipeline,
664
+ WuerstchenPriorPipeline,
665
+ )
666
+
667
+ try:
668
+ if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
669
+ raise OptionalDependencyNotAvailable()
670
+ except OptionalDependencyNotAvailable:
671
+ from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
672
+ else:
673
+ from .pipelines import StableDiffusionKDiffusionPipeline, StableDiffusionXLKDiffusionPipeline
674
+
675
+ try:
676
+ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
677
+ raise OptionalDependencyNotAvailable()
678
+ except OptionalDependencyNotAvailable:
679
+ from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
680
+ else:
681
+ from .pipelines import (
682
+ OnnxStableDiffusionImg2ImgPipeline,
683
+ OnnxStableDiffusionInpaintPipeline,
684
+ OnnxStableDiffusionInpaintPipelineLegacy,
685
+ OnnxStableDiffusionPipeline,
686
+ OnnxStableDiffusionUpscalePipeline,
687
+ StableDiffusionOnnxPipeline,
688
+ )
689
+
690
+ try:
691
+ if not (is_torch_available() and is_librosa_available()):
692
+ raise OptionalDependencyNotAvailable()
693
+ except OptionalDependencyNotAvailable:
694
+ from .utils.dummy_torch_and_librosa_objects import * # noqa F403
695
+ else:
696
+ from .pipelines import AudioDiffusionPipeline, Mel
697
+
698
+ try:
699
+ if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
700
+ raise OptionalDependencyNotAvailable()
701
+ except OptionalDependencyNotAvailable:
702
+ from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
703
+ else:
704
+ from .pipelines import SpectrogramDiffusionPipeline
705
+
706
+ try:
707
+ if not is_flax_available():
708
+ raise OptionalDependencyNotAvailable()
709
+ except OptionalDependencyNotAvailable:
710
+ from .utils.dummy_flax_objects import * # noqa F403
711
+ else:
712
+ from .models.controlnet_flax import FlaxControlNetModel
713
+ from .models.modeling_flax_utils import FlaxModelMixin
714
+ from .models.unets.unet_2d_condition_flax import FlaxUNet2DConditionModel
715
+ from .models.vae_flax import FlaxAutoencoderKL
716
+ from .pipelines import FlaxDiffusionPipeline
717
+ from .schedulers import (
718
+ FlaxDDIMScheduler,
719
+ FlaxDDPMScheduler,
720
+ FlaxDPMSolverMultistepScheduler,
721
+ FlaxEulerDiscreteScheduler,
722
+ FlaxKarrasVeScheduler,
723
+ FlaxLMSDiscreteScheduler,
724
+ FlaxPNDMScheduler,
725
+ FlaxSchedulerMixin,
726
+ FlaxScoreSdeVeScheduler,
727
+ )
728
+
729
+ try:
730
+ if not (is_flax_available() and is_transformers_available()):
731
+ raise OptionalDependencyNotAvailable()
732
+ except OptionalDependencyNotAvailable:
733
+ from .utils.dummy_flax_and_transformers_objects import * # noqa F403
734
+ else:
735
+ from .pipelines import (
736
+ FlaxStableDiffusionControlNetPipeline,
737
+ FlaxStableDiffusionImg2ImgPipeline,
738
+ FlaxStableDiffusionInpaintPipeline,
739
+ FlaxStableDiffusionPipeline,
740
+ FlaxStableDiffusionXLPipeline,
741
+ )
742
+
743
+ try:
744
+ if not (is_note_seq_available()):
745
+ raise OptionalDependencyNotAvailable()
746
+ except OptionalDependencyNotAvailable:
747
+ from .utils.dummy_note_seq_objects import * # noqa F403
748
+ else:
749
+ from .pipelines import MidiProcessor
750
+
751
+ else:
752
+ import sys
753
+
754
+ sys.modules[__name__] = _LazyModule(
755
+ __name__,
756
+ globals()["__file__"],
757
+ _import_structure,
758
+ module_spec=__spec__,
759
+ extra_objects={"__version__": __version__},
760
+ )
diffusers/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (16.2 kB). View file
 
diffusers/__pycache__/configuration_utils.cpython-38.pyc ADDED
Binary file (24.2 kB). View file
 
diffusers/__pycache__/dependency_versions_check.cpython-38.pyc ADDED
Binary file (657 Bytes). View file
 
diffusers/__pycache__/dependency_versions_table.cpython-38.pyc ADDED
Binary file (1.25 kB). View file
 
diffusers/__pycache__/image_processor.cpython-38.pyc ADDED
Binary file (27.3 kB). View file
 
diffusers/commands/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from abc import ABC, abstractmethod
16
+ from argparse import ArgumentParser
17
+
18
+
19
+ class BaseDiffusersCLICommand(ABC):
20
+ @staticmethod
21
+ @abstractmethod
22
+ def register_subcommand(parser: ArgumentParser):
23
+ raise NotImplementedError()
24
+
25
+ @abstractmethod
26
+ def run(self):
27
+ raise NotImplementedError()
diffusers/commands/diffusers_cli.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from argparse import ArgumentParser
17
+
18
+ from .env import EnvironmentCommand
19
+ from .fp16_safetensors import FP16SafetensorsCommand
20
+
21
+
22
+ def main():
23
+ parser = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli <command> [<args>]")
24
+ commands_parser = parser.add_subparsers(help="diffusers-cli command helpers")
25
+
26
+ # Register commands
27
+ EnvironmentCommand.register_subcommand(commands_parser)
28
+ FP16SafetensorsCommand.register_subcommand(commands_parser)
29
+
30
+ # Let's go
31
+ args = parser.parse_args()
32
+
33
+ if not hasattr(args, "func"):
34
+ parser.print_help()
35
+ exit(1)
36
+
37
+ # Run
38
+ service = args.func(args)
39
+ service.run()
40
+
41
+
42
+ if __name__ == "__main__":
43
+ main()
diffusers/commands/env.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import platform
16
+ from argparse import ArgumentParser
17
+
18
+ import huggingface_hub
19
+
20
+ from .. import __version__ as version
21
+ from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
22
+ from . import BaseDiffusersCLICommand
23
+
24
+
25
+ def info_command_factory(_):
26
+ return EnvironmentCommand()
27
+
28
+
29
+ class EnvironmentCommand(BaseDiffusersCLICommand):
30
+ @staticmethod
31
+ def register_subcommand(parser: ArgumentParser):
32
+ download_parser = parser.add_parser("env")
33
+ download_parser.set_defaults(func=info_command_factory)
34
+
35
+ def run(self):
36
+ hub_version = huggingface_hub.__version__
37
+
38
+ pt_version = "not installed"
39
+ pt_cuda_available = "NA"
40
+ if is_torch_available():
41
+ import torch
42
+
43
+ pt_version = torch.__version__
44
+ pt_cuda_available = torch.cuda.is_available()
45
+
46
+ transformers_version = "not installed"
47
+ if is_transformers_available():
48
+ import transformers
49
+
50
+ transformers_version = transformers.__version__
51
+
52
+ accelerate_version = "not installed"
53
+ if is_accelerate_available():
54
+ import accelerate
55
+
56
+ accelerate_version = accelerate.__version__
57
+
58
+ xformers_version = "not installed"
59
+ if is_xformers_available():
60
+ import xformers
61
+
62
+ xformers_version = xformers.__version__
63
+
64
+ info = {
65
+ "`diffusers` version": version,
66
+ "Platform": platform.platform(),
67
+ "Python version": platform.python_version(),
68
+ "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
69
+ "Huggingface_hub version": hub_version,
70
+ "Transformers version": transformers_version,
71
+ "Accelerate version": accelerate_version,
72
+ "xFormers version": xformers_version,
73
+ "Using GPU in script?": "<fill in>",
74
+ "Using distributed or parallel set-up in script?": "<fill in>",
75
+ }
76
+
77
+ print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
78
+ print(self.format_dict(info))
79
+
80
+ return info
81
+
82
+ @staticmethod
83
+ def format_dict(d):
84
+ return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
diffusers/commands/fp16_safetensors.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Usage example:
17
+ diffusers-cli fp16_safetensors --ckpt_id=openai/shap-e --fp16 --use_safetensors
18
+ """
19
+
20
+ import glob
21
+ import json
22
+ import warnings
23
+ from argparse import ArgumentParser, Namespace
24
+ from importlib import import_module
25
+
26
+ import huggingface_hub
27
+ import torch
28
+ from huggingface_hub import hf_hub_download
29
+ from packaging import version
30
+
31
+ from ..utils import logging
32
+ from . import BaseDiffusersCLICommand
33
+
34
+
35
+ def conversion_command_factory(args: Namespace):
36
+ if args.use_auth_token:
37
+ warnings.warn(
38
+ "The `--use_auth_token` flag is deprecated and will be removed in a future version. Authentication is now"
39
+ " handled automatically if user is logged in."
40
+ )
41
+ return FP16SafetensorsCommand(args.ckpt_id, args.fp16, args.use_safetensors)
42
+
43
+
44
+ class FP16SafetensorsCommand(BaseDiffusersCLICommand):
45
+ @staticmethod
46
+ def register_subcommand(parser: ArgumentParser):
47
+ conversion_parser = parser.add_parser("fp16_safetensors")
48
+ conversion_parser.add_argument(
49
+ "--ckpt_id",
50
+ type=str,
51
+ help="Repo id of the checkpoints on which to run the conversion. Example: 'openai/shap-e'.",
52
+ )
53
+ conversion_parser.add_argument(
54
+ "--fp16", action="store_true", help="If serializing the variables in FP16 precision."
55
+ )
56
+ conversion_parser.add_argument(
57
+ "--use_safetensors", action="store_true", help="If serializing in the safetensors format."
58
+ )
59
+ conversion_parser.add_argument(
60
+ "--use_auth_token",
61
+ action="store_true",
62
+ help="When working with checkpoints having private visibility. When used `huggingface-cli login` needs to be run beforehand.",
63
+ )
64
+ conversion_parser.set_defaults(func=conversion_command_factory)
65
+
66
+ def __init__(self, ckpt_id: str, fp16: bool, use_safetensors: bool):
67
+ self.logger = logging.get_logger("diffusers-cli/fp16_safetensors")
68
+ self.ckpt_id = ckpt_id
69
+ self.local_ckpt_dir = f"/tmp/{ckpt_id}"
70
+ self.fp16 = fp16
71
+
72
+ self.use_safetensors = use_safetensors
73
+
74
+ if not self.use_safetensors and not self.fp16:
75
+ raise NotImplementedError(
76
+ "When `use_safetensors` and `fp16` both are False, then this command is of no use."
77
+ )
78
+
79
+ def run(self):
80
+ if version.parse(huggingface_hub.__version__) < version.parse("0.9.0"):
81
+ raise ImportError(
82
+ "The huggingface_hub version must be >= 0.9.0 to use this command. Please update your huggingface_hub"
83
+ " installation."
84
+ )
85
+ else:
86
+ from huggingface_hub import create_commit
87
+ from huggingface_hub._commit_api import CommitOperationAdd
88
+
89
+ model_index = hf_hub_download(repo_id=self.ckpt_id, filename="model_index.json")
90
+ with open(model_index, "r") as f:
91
+ pipeline_class_name = json.load(f)["_class_name"]
92
+ pipeline_class = getattr(import_module("diffusers"), pipeline_class_name)
93
+ self.logger.info(f"Pipeline class imported: {pipeline_class_name}.")
94
+
95
+ # Load the appropriate pipeline. We could have use `DiffusionPipeline`
96
+ # here, but just to avoid any rough edge cases.
97
+ pipeline = pipeline_class.from_pretrained(
98
+ self.ckpt_id, torch_dtype=torch.float16 if self.fp16 else torch.float32
99
+ )
100
+ pipeline.save_pretrained(
101
+ self.local_ckpt_dir,
102
+ safe_serialization=True if self.use_safetensors else False,
103
+ variant="fp16" if self.fp16 else None,
104
+ )
105
+ self.logger.info(f"Pipeline locally saved to {self.local_ckpt_dir}.")
106
+
107
+ # Fetch all the paths.
108
+ if self.fp16:
109
+ modified_paths = glob.glob(f"{self.local_ckpt_dir}/*/*.fp16.*")
110
+ elif self.use_safetensors:
111
+ modified_paths = glob.glob(f"{self.local_ckpt_dir}/*/*.safetensors")
112
+
113
+ # Prepare for the PR.
114
+ commit_message = f"Serialize variables with FP16: {self.fp16} and safetensors: {self.use_safetensors}."
115
+ operations = []
116
+ for path in modified_paths:
117
+ operations.append(CommitOperationAdd(path_in_repo="/".join(path.split("/")[4:]), path_or_fileobj=path))
118
+
119
+ # Open the PR.
120
+ commit_description = (
121
+ "Variables converted by the [`diffusers`' `fp16_safetensors`"
122
+ " CLI](https://github.com/huggingface/diffusers/blob/main/src/diffusers/commands/fp16_safetensors.py)."
123
+ )
124
+ hub_pr_url = create_commit(
125
+ repo_id=self.ckpt_id,
126
+ operations=operations,
127
+ commit_message=commit_message,
128
+ commit_description=commit_description,
129
+ repo_type="model",
130
+ create_pr=True,
131
+ ).pr_url
132
+ self.logger.info(f"PR created here: {hub_pr_url}.")
diffusers/configuration_utils.py ADDED
@@ -0,0 +1,699 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ ConfigMixin base class and utilities."""
17
+ import dataclasses
18
+ import functools
19
+ import importlib
20
+ import inspect
21
+ import json
22
+ import os
23
+ import re
24
+ from collections import OrderedDict
25
+ from pathlib import PosixPath
26
+ from typing import Any, Dict, Tuple, Union
27
+
28
+ import numpy as np
29
+ from huggingface_hub import create_repo, hf_hub_download
30
+ from huggingface_hub.utils import (
31
+ EntryNotFoundError,
32
+ RepositoryNotFoundError,
33
+ RevisionNotFoundError,
34
+ validate_hf_hub_args,
35
+ )
36
+ from requests import HTTPError
37
+
38
+ from . import __version__
39
+ from .utils import (
40
+ HUGGINGFACE_CO_RESOLVE_ENDPOINT,
41
+ DummyObject,
42
+ deprecate,
43
+ extract_commit_hash,
44
+ http_user_agent,
45
+ logging,
46
+ )
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+ _re_configuration_file = re.compile(r"config\.(.*)\.json")
52
+
53
+
54
+ class FrozenDict(OrderedDict):
55
+ def __init__(self, *args, **kwargs):
56
+ super().__init__(*args, **kwargs)
57
+
58
+ for key, value in self.items():
59
+ setattr(self, key, value)
60
+
61
+ self.__frozen = True
62
+
63
+ def __delitem__(self, *args, **kwargs):
64
+ raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
65
+
66
+ def setdefault(self, *args, **kwargs):
67
+ raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
68
+
69
+ def pop(self, *args, **kwargs):
70
+ raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
71
+
72
+ def update(self, *args, **kwargs):
73
+ raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
74
+
75
+ def __setattr__(self, name, value):
76
+ if hasattr(self, "__frozen") and self.__frozen:
77
+ raise Exception(f"You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.")
78
+ super().__setattr__(name, value)
79
+
80
+ def __setitem__(self, name, value):
81
+ if hasattr(self, "__frozen") and self.__frozen:
82
+ raise Exception(f"You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.")
83
+ super().__setitem__(name, value)
84
+
85
+
86
+ class ConfigMixin:
87
+ r"""
88
+ Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also
89
+ provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.save_config`] methods for loading, downloading, and
90
+ saving classes that inherit from [`ConfigMixin`].
91
+
92
+ Class attributes:
93
+ - **config_name** (`str`) -- A filename under which the config should stored when calling
94
+ [`~ConfigMixin.save_config`] (should be overridden by parent class).
95
+ - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be
96
+ overridden by subclass).
97
+ - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).
98
+ - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function
99
+ should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by
100
+ subclass).
101
+ """
102
+
103
+ config_name = None
104
+ ignore_for_config = []
105
+ has_compatibles = False
106
+
107
+ _deprecated_kwargs = []
108
+
109
+ def register_to_config(self, **kwargs):
110
+ if self.config_name is None:
111
+ raise NotImplementedError(f"Make sure that {self.__class__} has defined a class name `config_name`")
112
+ # Special case for `kwargs` used in deprecation warning added to schedulers
113
+ # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,
114
+ # or solve in a more general way.
115
+ kwargs.pop("kwargs", None)
116
+
117
+ if not hasattr(self, "_internal_dict"):
118
+ internal_dict = kwargs
119
+ else:
120
+ previous_dict = dict(self._internal_dict)
121
+ internal_dict = {**self._internal_dict, **kwargs}
122
+ logger.debug(f"Updating config from {previous_dict} to {internal_dict}")
123
+
124
+ self._internal_dict = FrozenDict(internal_dict)
125
+
126
+ def __getattr__(self, name: str) -> Any:
127
+ """The only reason we overwrite `getattr` here is to gracefully deprecate accessing
128
+ config attributes directly. See https://github.com/huggingface/diffusers/pull/3129
129
+
130
+ Tihs funtion is mostly copied from PyTorch's __getattr__ overwrite:
131
+ https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module
132
+ """
133
+
134
+ is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name)
135
+ is_attribute = name in self.__dict__
136
+
137
+ if is_in_config and not is_attribute:
138
+ deprecation_message = f"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'."
139
+ deprecate("direct config name access", "1.0.0", deprecation_message, standard_warn=False)
140
+ return self._internal_dict[name]
141
+
142
+ raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
143
+
144
+ def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
145
+ """
146
+ Save a configuration object to the directory specified in `save_directory` so that it can be reloaded using the
147
+ [`~ConfigMixin.from_config`] class method.
148
+
149
+ Args:
150
+ save_directory (`str` or `os.PathLike`):
151
+ Directory where the configuration JSON file is saved (will be created if it does not exist).
152
+ push_to_hub (`bool`, *optional*, defaults to `False`):
153
+ Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the
154
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
155
+ namespace).
156
+ kwargs (`Dict[str, Any]`, *optional*):
157
+ Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
158
+ """
159
+ if os.path.isfile(save_directory):
160
+ raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
161
+
162
+ os.makedirs(save_directory, exist_ok=True)
163
+
164
+ # If we save using the predefined names, we can load using `from_config`
165
+ output_config_file = os.path.join(save_directory, self.config_name)
166
+
167
+ self.to_json_file(output_config_file)
168
+ logger.info(f"Configuration saved in {output_config_file}")
169
+
170
+ if push_to_hub:
171
+ commit_message = kwargs.pop("commit_message", None)
172
+ private = kwargs.pop("private", False)
173
+ create_pr = kwargs.pop("create_pr", False)
174
+ token = kwargs.pop("token", None)
175
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
176
+ repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id
177
+
178
+ self._upload_folder(
179
+ save_directory,
180
+ repo_id,
181
+ token=token,
182
+ commit_message=commit_message,
183
+ create_pr=create_pr,
184
+ )
185
+
186
+ @classmethod
187
+ def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):
188
+ r"""
189
+ Instantiate a Python class from a config dictionary.
190
+
191
+ Parameters:
192
+ config (`Dict[str, Any]`):
193
+ A config dictionary from which the Python class is instantiated. Make sure to only load configuration
194
+ files of compatible classes.
195
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
196
+ Whether kwargs that are not consumed by the Python class should be returned or not.
197
+ kwargs (remaining dictionary of keyword arguments, *optional*):
198
+ Can be used to update the configuration object (after it is loaded) and initiate the Python class.
199
+ `**kwargs` are passed directly to the underlying scheduler/model's `__init__` method and eventually
200
+ overwrite the same named arguments in `config`.
201
+
202
+ Returns:
203
+ [`ModelMixin`] or [`SchedulerMixin`]:
204
+ A model or scheduler object instantiated from a config dictionary.
205
+
206
+ Examples:
207
+
208
+ ```python
209
+ >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler
210
+
211
+ >>> # Download scheduler from huggingface.co and cache.
212
+ >>> scheduler = DDPMScheduler.from_pretrained("google/ddpm-cifar10-32")
213
+
214
+ >>> # Instantiate DDIM scheduler class with same config as DDPM
215
+ >>> scheduler = DDIMScheduler.from_config(scheduler.config)
216
+
217
+ >>> # Instantiate PNDM scheduler class with same config as DDPM
218
+ >>> scheduler = PNDMScheduler.from_config(scheduler.config)
219
+ ```
220
+ """
221
+ # <===== TO BE REMOVED WITH DEPRECATION
222
+ # TODO(Patrick) - make sure to remove the following lines when config=="model_path" is deprecated
223
+ if "pretrained_model_name_or_path" in kwargs:
224
+ config = kwargs.pop("pretrained_model_name_or_path")
225
+
226
+ if config is None:
227
+ raise ValueError("Please make sure to provide a config as the first positional argument.")
228
+ # ======>
229
+
230
+ if not isinstance(config, dict):
231
+ deprecation_message = "It is deprecated to pass a pretrained model name or path to `from_config`."
232
+ if "Scheduler" in cls.__name__:
233
+ deprecation_message += (
234
+ f"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead."
235
+ " Otherwise, please make sure to pass a configuration dictionary instead. This functionality will"
236
+ " be removed in v1.0.0."
237
+ )
238
+ elif "Model" in cls.__name__:
239
+ deprecation_message += (
240
+ f"If you were trying to load a model, please use {cls}.load_config(...) followed by"
241
+ f" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary"
242
+ " instead. This functionality will be removed in v1.0.0."
243
+ )
244
+ deprecate("config-passed-as-path", "1.0.0", deprecation_message, standard_warn=False)
245
+ config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)
246
+
247
+ init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)
248
+
249
+ # Allow dtype to be specified on initialization
250
+ if "dtype" in unused_kwargs:
251
+ init_dict["dtype"] = unused_kwargs.pop("dtype")
252
+
253
+ # add possible deprecated kwargs
254
+ for deprecated_kwarg in cls._deprecated_kwargs:
255
+ if deprecated_kwarg in unused_kwargs:
256
+ init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)
257
+
258
+ # Return model and optionally state and/or unused_kwargs
259
+ model = cls(**init_dict)
260
+
261
+ # make sure to also save config parameters that might be used for compatible classes
262
+ model.register_to_config(**hidden_dict)
263
+
264
+ # add hidden kwargs of compatible classes to unused_kwargs
265
+ unused_kwargs = {**unused_kwargs, **hidden_dict}
266
+
267
+ if return_unused_kwargs:
268
+ return (model, unused_kwargs)
269
+ else:
270
+ return model
271
+
272
+ @classmethod
273
+ def get_config_dict(cls, *args, **kwargs):
274
+ deprecation_message = (
275
+ f" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be"
276
+ " removed in version v1.0.0"
277
+ )
278
+ deprecate("get_config_dict", "1.0.0", deprecation_message, standard_warn=False)
279
+ return cls.load_config(*args, **kwargs)
280
+
281
+ @classmethod
282
+ @validate_hf_hub_args
283
+ def load_config(
284
+ cls,
285
+ pretrained_model_name_or_path: Union[str, os.PathLike],
286
+ return_unused_kwargs=False,
287
+ return_commit_hash=False,
288
+ **kwargs,
289
+ ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
290
+ r"""
291
+ Load a model or scheduler configuration.
292
+
293
+ Parameters:
294
+ pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
295
+ Can be either:
296
+
297
+ - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
298
+ the Hub.
299
+ - A path to a *directory* (for example `./my_model_directory`) containing model weights saved with
300
+ [`~ConfigMixin.save_config`].
301
+
302
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
303
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
304
+ is not used.
305
+ force_download (`bool`, *optional*, defaults to `False`):
306
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
307
+ cached versions if they exist.
308
+ resume_download (`bool`, *optional*, defaults to `False`):
309
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
310
+ incompletely downloaded files are deleted.
311
+ proxies (`Dict[str, str]`, *optional*):
312
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
313
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
314
+ output_loading_info(`bool`, *optional*, defaults to `False`):
315
+ Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
316
+ local_files_only (`bool`, *optional*, defaults to `False`):
317
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
318
+ won't be downloaded from the Hub.
319
+ token (`str` or *bool*, *optional*):
320
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
321
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
322
+ revision (`str`, *optional*, defaults to `"main"`):
323
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
324
+ allowed by Git.
325
+ subfolder (`str`, *optional*, defaults to `""`):
326
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
327
+ return_unused_kwargs (`bool`, *optional*, defaults to `False):
328
+ Whether unused keyword arguments of the config are returned.
329
+ return_commit_hash (`bool`, *optional*, defaults to `False):
330
+ Whether the `commit_hash` of the loaded configuration are returned.
331
+
332
+ Returns:
333
+ `dict`:
334
+ A dictionary of all the parameters stored in a JSON configuration file.
335
+
336
+ """
337
+ cache_dir = kwargs.pop("cache_dir", None)
338
+ force_download = kwargs.pop("force_download", False)
339
+ resume_download = kwargs.pop("resume_download", False)
340
+ proxies = kwargs.pop("proxies", None)
341
+ token = kwargs.pop("token", None)
342
+ local_files_only = kwargs.pop("local_files_only", False)
343
+ revision = kwargs.pop("revision", None)
344
+ _ = kwargs.pop("mirror", None)
345
+ subfolder = kwargs.pop("subfolder", None)
346
+ user_agent = kwargs.pop("user_agent", {})
347
+
348
+ user_agent = {**user_agent, "file_type": "config"}
349
+ user_agent = http_user_agent(user_agent)
350
+
351
+ pretrained_model_name_or_path = str(pretrained_model_name_or_path)
352
+
353
+ if cls.config_name is None:
354
+ raise ValueError(
355
+ "`self.config_name` is not defined. Note that one should not load a config from "
356
+ "`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`"
357
+ )
358
+
359
+ if os.path.isfile(pretrained_model_name_or_path):
360
+ config_file = pretrained_model_name_or_path
361
+ elif os.path.isdir(pretrained_model_name_or_path):
362
+ if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):
363
+ # Load from a PyTorch checkpoint
364
+ config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)
365
+ elif subfolder is not None and os.path.isfile(
366
+ os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)
367
+ ):
368
+ config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)
369
+ else:
370
+ raise EnvironmentError(
371
+ f"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}."
372
+ )
373
+ else:
374
+ try:
375
+ # Load from URL or cache if already cached
376
+ config_file = hf_hub_download(
377
+ pretrained_model_name_or_path,
378
+ filename=cls.config_name,
379
+ cache_dir=cache_dir,
380
+ force_download=force_download,
381
+ proxies=proxies,
382
+ resume_download=resume_download,
383
+ local_files_only=local_files_only,
384
+ token=token,
385
+ user_agent=user_agent,
386
+ subfolder=subfolder,
387
+ revision=revision,
388
+ )
389
+ except RepositoryNotFoundError:
390
+ raise EnvironmentError(
391
+ f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier"
392
+ " listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a"
393
+ " token having permission to this repo with `token` or log in with `huggingface-cli login`."
394
+ )
395
+ except RevisionNotFoundError:
396
+ raise EnvironmentError(
397
+ f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for"
398
+ " this model name. Check the model page at"
399
+ f" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions."
400
+ )
401
+ except EntryNotFoundError:
402
+ raise EnvironmentError(
403
+ f"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}."
404
+ )
405
+ except HTTPError as err:
406
+ raise EnvironmentError(
407
+ "There was a specific connection error when trying to load"
408
+ f" {pretrained_model_name_or_path}:\n{err}"
409
+ )
410
+ except ValueError:
411
+ raise EnvironmentError(
412
+ f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
413
+ f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
414
+ f" directory containing a {cls.config_name} file.\nCheckout your internet connection or see how to"
415
+ " run the library in offline mode at"
416
+ " 'https://huggingface.co/docs/diffusers/installation#offline-mode'."
417
+ )
418
+ except EnvironmentError:
419
+ raise EnvironmentError(
420
+ f"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from "
421
+ "'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
422
+ f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
423
+ f"containing a {cls.config_name} file"
424
+ )
425
+
426
+ try:
427
+ # Load config dict
428
+ config_dict = cls._dict_from_json_file(config_file)
429
+
430
+ commit_hash = extract_commit_hash(config_file)
431
+ except (json.JSONDecodeError, UnicodeDecodeError):
432
+ raise EnvironmentError(f"It looks like the config file at '{config_file}' is not a valid JSON file.")
433
+
434
+ if not (return_unused_kwargs or return_commit_hash):
435
+ return config_dict
436
+
437
+ outputs = (config_dict,)
438
+
439
+ if return_unused_kwargs:
440
+ outputs += (kwargs,)
441
+
442
+ if return_commit_hash:
443
+ outputs += (commit_hash,)
444
+
445
+ return outputs
446
+
447
+ @staticmethod
448
+ def _get_init_keys(cls):
449
+ return set(dict(inspect.signature(cls.__init__).parameters).keys())
450
+
451
+ @classmethod
452
+ def extract_init_dict(cls, config_dict, **kwargs):
453
+ # Skip keys that were not present in the original config, so default __init__ values were used
454
+ used_defaults = config_dict.get("_use_default_values", [])
455
+ config_dict = {k: v for k, v in config_dict.items() if k not in used_defaults and k != "_use_default_values"}
456
+
457
+ # 0. Copy origin config dict
458
+ original_dict = dict(config_dict.items())
459
+
460
+ # 1. Retrieve expected config attributes from __init__ signature
461
+ expected_keys = cls._get_init_keys(cls)
462
+ expected_keys.remove("self")
463
+ # remove general kwargs if present in dict
464
+ if "kwargs" in expected_keys:
465
+ expected_keys.remove("kwargs")
466
+ # remove flax internal keys
467
+ if hasattr(cls, "_flax_internal_args"):
468
+ for arg in cls._flax_internal_args:
469
+ expected_keys.remove(arg)
470
+
471
+ # 2. Remove attributes that cannot be expected from expected config attributes
472
+ # remove keys to be ignored
473
+ if len(cls.ignore_for_config) > 0:
474
+ expected_keys = expected_keys - set(cls.ignore_for_config)
475
+
476
+ # load diffusers library to import compatible and original scheduler
477
+ diffusers_library = importlib.import_module(__name__.split(".")[0])
478
+
479
+ if cls.has_compatibles:
480
+ compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]
481
+ else:
482
+ compatible_classes = []
483
+
484
+ expected_keys_comp_cls = set()
485
+ for c in compatible_classes:
486
+ expected_keys_c = cls._get_init_keys(c)
487
+ expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)
488
+ expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)
489
+ config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}
490
+
491
+ # remove attributes from orig class that cannot be expected
492
+ orig_cls_name = config_dict.pop("_class_name", cls.__name__)
493
+ if (
494
+ isinstance(orig_cls_name, str)
495
+ and orig_cls_name != cls.__name__
496
+ and hasattr(diffusers_library, orig_cls_name)
497
+ ):
498
+ orig_cls = getattr(diffusers_library, orig_cls_name)
499
+ unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys
500
+ config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}
501
+ elif not isinstance(orig_cls_name, str) and not isinstance(orig_cls_name, (list, tuple)):
502
+ raise ValueError(
503
+ "Make sure that the `_class_name` is of type string or list of string (for custom pipelines)."
504
+ )
505
+
506
+ # remove private attributes
507
+ config_dict = {k: v for k, v in config_dict.items() if not k.startswith("_")}
508
+
509
+ # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments
510
+ init_dict = {}
511
+ for key in expected_keys:
512
+ # if config param is passed to kwarg and is present in config dict
513
+ # it should overwrite existing config dict key
514
+ if key in kwargs and key in config_dict:
515
+ config_dict[key] = kwargs.pop(key)
516
+
517
+ if key in kwargs:
518
+ # overwrite key
519
+ init_dict[key] = kwargs.pop(key)
520
+ elif key in config_dict:
521
+ # use value from config dict
522
+ init_dict[key] = config_dict.pop(key)
523
+
524
+ # 4. Give nice warning if unexpected values have been passed
525
+ if len(config_dict) > 0:
526
+ logger.warning(
527
+ f"The config attributes {config_dict} were passed to {cls.__name__}, "
528
+ "but are not expected and will be ignored. Please verify your "
529
+ f"{cls.config_name} configuration file."
530
+ )
531
+
532
+ # 5. Give nice info if config attributes are initiliazed to default because they have not been passed
533
+ passed_keys = set(init_dict.keys())
534
+ if len(expected_keys - passed_keys) > 0:
535
+ logger.info(
536
+ f"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values."
537
+ )
538
+
539
+ # 6. Define unused keyword arguments
540
+ unused_kwargs = {**config_dict, **kwargs}
541
+
542
+ # 7. Define "hidden" config parameters that were saved for compatible classes
543
+ hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}
544
+
545
+ return init_dict, unused_kwargs, hidden_config_dict
546
+
547
+ @classmethod
548
+ def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
549
+ with open(json_file, "r", encoding="utf-8") as reader:
550
+ text = reader.read()
551
+ return json.loads(text)
552
+
553
+ def __repr__(self):
554
+ return f"{self.__class__.__name__} {self.to_json_string()}"
555
+
556
+ @property
557
+ def config(self) -> Dict[str, Any]:
558
+ """
559
+ Returns the config of the class as a frozen dictionary
560
+
561
+ Returns:
562
+ `Dict[str, Any]`: Config of the class.
563
+ """
564
+ return self._internal_dict
565
+
566
+ def to_json_string(self) -> str:
567
+ """
568
+ Serializes the configuration instance to a JSON string.
569
+
570
+ Returns:
571
+ `str`:
572
+ String containing all the attributes that make up the configuration instance in JSON format.
573
+ """
574
+ config_dict = self._internal_dict if hasattr(self, "_internal_dict") else {}
575
+ config_dict["_class_name"] = self.__class__.__name__
576
+ config_dict["_diffusers_version"] = __version__
577
+
578
+ def to_json_saveable(value):
579
+ if isinstance(value, np.ndarray):
580
+ value = value.tolist()
581
+ elif isinstance(value, PosixPath):
582
+ value = str(value)
583
+ return value
584
+
585
+ config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}
586
+ # Don't save "_ignore_files" or "_use_default_values"
587
+ config_dict.pop("_ignore_files", None)
588
+ config_dict.pop("_use_default_values", None)
589
+
590
+ return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
591
+
592
+ def to_json_file(self, json_file_path: Union[str, os.PathLike]):
593
+ """
594
+ Save the configuration instance's parameters to a JSON file.
595
+
596
+ Args:
597
+ json_file_path (`str` or `os.PathLike`):
598
+ Path to the JSON file to save a configuration instance's parameters.
599
+ """
600
+ with open(json_file_path, "w", encoding="utf-8") as writer:
601
+ writer.write(self.to_json_string())
602
+
603
+
604
+ def register_to_config(init):
605
+ r"""
606
+ Decorator to apply on the init of classes inheriting from [`ConfigMixin`] so that all the arguments are
607
+ automatically sent to `self.register_for_config`. To ignore a specific argument accepted by the init but that
608
+ shouldn't be registered in the config, use the `ignore_for_config` class variable
609
+
610
+ Warning: Once decorated, all private arguments (beginning with an underscore) are trashed and not sent to the init!
611
+ """
612
+
613
+ @functools.wraps(init)
614
+ def inner_init(self, *args, **kwargs):
615
+ # Ignore private kwargs in the init.
616
+ init_kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")}
617
+ config_init_kwargs = {k: v for k, v in kwargs.items() if k.startswith("_")}
618
+ if not isinstance(self, ConfigMixin):
619
+ raise RuntimeError(
620
+ f"`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does "
621
+ "not inherit from `ConfigMixin`."
622
+ )
623
+
624
+ ignore = getattr(self, "ignore_for_config", [])
625
+ # Get positional arguments aligned with kwargs
626
+ new_kwargs = {}
627
+ signature = inspect.signature(init)
628
+ parameters = {
629
+ name: p.default for i, (name, p) in enumerate(signature.parameters.items()) if i > 0 and name not in ignore
630
+ }
631
+ for arg, name in zip(args, parameters.keys()):
632
+ new_kwargs[name] = arg
633
+
634
+ # Then add all kwargs
635
+ new_kwargs.update(
636
+ {
637
+ k: init_kwargs.get(k, default)
638
+ for k, default in parameters.items()
639
+ if k not in ignore and k not in new_kwargs
640
+ }
641
+ )
642
+
643
+ # Take note of the parameters that were not present in the loaded config
644
+ if len(set(new_kwargs.keys()) - set(init_kwargs)) > 0:
645
+ new_kwargs["_use_default_values"] = list(set(new_kwargs.keys()) - set(init_kwargs))
646
+
647
+ new_kwargs = {**config_init_kwargs, **new_kwargs}
648
+ getattr(self, "register_to_config")(**new_kwargs)
649
+ init(self, *args, **init_kwargs)
650
+
651
+ return inner_init
652
+
653
+
654
+ def flax_register_to_config(cls):
655
+ original_init = cls.__init__
656
+
657
+ @functools.wraps(original_init)
658
+ def init(self, *args, **kwargs):
659
+ if not isinstance(self, ConfigMixin):
660
+ raise RuntimeError(
661
+ f"`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does "
662
+ "not inherit from `ConfigMixin`."
663
+ )
664
+
665
+ # Ignore private kwargs in the init. Retrieve all passed attributes
666
+ init_kwargs = dict(kwargs.items())
667
+
668
+ # Retrieve default values
669
+ fields = dataclasses.fields(self)
670
+ default_kwargs = {}
671
+ for field in fields:
672
+ # ignore flax specific attributes
673
+ if field.name in self._flax_internal_args:
674
+ continue
675
+ if type(field.default) == dataclasses._MISSING_TYPE:
676
+ default_kwargs[field.name] = None
677
+ else:
678
+ default_kwargs[field.name] = getattr(self, field.name)
679
+
680
+ # Make sure init_kwargs override default kwargs
681
+ new_kwargs = {**default_kwargs, **init_kwargs}
682
+ # dtype should be part of `init_kwargs`, but not `new_kwargs`
683
+ if "dtype" in new_kwargs:
684
+ new_kwargs.pop("dtype")
685
+
686
+ # Get positional arguments aligned with kwargs
687
+ for i, arg in enumerate(args):
688
+ name = fields[i].name
689
+ new_kwargs[name] = arg
690
+
691
+ # Take note of the parameters that were not present in the loaded config
692
+ if len(set(new_kwargs.keys()) - set(init_kwargs)) > 0:
693
+ new_kwargs["_use_default_values"] = list(set(new_kwargs.keys()) - set(init_kwargs))
694
+
695
+ getattr(self, "register_to_config")(**new_kwargs)
696
+ original_init(self, *args, **kwargs)
697
+
698
+ cls.__init__ = init
699
+ return cls
diffusers/dependency_versions_check.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .dependency_versions_table import deps
16
+ from .utils.versions import require_version, require_version_core
17
+
18
+
19
+ # define which module versions we always want to check at run time
20
+ # (usually the ones defined in `install_requires` in setup.py)
21
+ #
22
+ # order specific notes:
23
+ # - tqdm must be checked before tokenizers
24
+
25
+ pkgs_to_check_at_runtime = "python requests filelock numpy".split()
26
+ for pkg in pkgs_to_check_at_runtime:
27
+ if pkg in deps:
28
+ require_version_core(deps[pkg])
29
+ else:
30
+ raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
31
+
32
+
33
+ def dep_version_check(pkg, hint=None):
34
+ require_version(deps[pkg], hint)
diffusers/dependency_versions_table.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # THIS FILE HAS BEEN AUTOGENERATED. To update:
2
+ # 1. modify the `_deps` dict in setup.py
3
+ # 2. run `make deps_table_update`
4
+ deps = {
5
+ "Pillow": "Pillow",
6
+ "accelerate": "accelerate>=0.11.0",
7
+ "compel": "compel==0.1.8",
8
+ "datasets": "datasets",
9
+ "filelock": "filelock",
10
+ "flax": "flax>=0.4.1",
11
+ "hf-doc-builder": "hf-doc-builder>=0.3.0",
12
+ "huggingface-hub": "huggingface-hub>=0.20.2",
13
+ "requests-mock": "requests-mock==1.10.0",
14
+ "importlib_metadata": "importlib_metadata",
15
+ "invisible-watermark": "invisible-watermark>=0.2.0",
16
+ "isort": "isort>=5.5.4",
17
+ "jax": "jax>=0.4.1",
18
+ "jaxlib": "jaxlib>=0.4.1",
19
+ "Jinja2": "Jinja2",
20
+ "k-diffusion": "k-diffusion>=0.0.12",
21
+ "torchsde": "torchsde",
22
+ "note_seq": "note_seq",
23
+ "librosa": "librosa",
24
+ "numpy": "numpy",
25
+ "parameterized": "parameterized",
26
+ "peft": "peft>=0.6.0",
27
+ "protobuf": "protobuf>=3.20.3,<4",
28
+ "pytest": "pytest",
29
+ "pytest-timeout": "pytest-timeout",
30
+ "pytest-xdist": "pytest-xdist",
31
+ "python": "python>=3.8.0",
32
+ "ruff": "ruff==0.1.5",
33
+ "safetensors": "safetensors>=0.3.1",
34
+ "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
35
+ "GitPython": "GitPython<3.1.19",
36
+ "scipy": "scipy",
37
+ "onnx": "onnx",
38
+ "regex": "regex!=2019.12.17",
39
+ "requests": "requests",
40
+ "tensorboard": "tensorboard",
41
+ "torch": "torch>=1.4",
42
+ "torchvision": "torchvision",
43
+ "transformers": "transformers>=4.25.1",
44
+ "urllib3": "urllib3<=2.0.0",
45
+ }
diffusers/experimental/README.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # 🧨 Diffusers Experimental
2
+
3
+ We are adding experimental code to support novel applications and usages of the Diffusers library.
4
+ Currently, the following experiments are supported:
5
+ * Reinforcement learning via an implementation of the [Diffuser](https://arxiv.org/abs/2205.09991) model.
diffusers/experimental/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .rl import ValueGuidedRLPipeline
diffusers/experimental/rl/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .value_guided_sampling import ValueGuidedRLPipeline
diffusers/experimental/rl/value_guided_sampling.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import numpy as np
16
+ import torch
17
+ import tqdm
18
+
19
+ from ...models.unets.unet_1d import UNet1DModel
20
+ from ...pipelines import DiffusionPipeline
21
+ from ...utils.dummy_pt_objects import DDPMScheduler
22
+ from ...utils.torch_utils import randn_tensor
23
+
24
+
25
+ class ValueGuidedRLPipeline(DiffusionPipeline):
26
+ r"""
27
+ Pipeline for value-guided sampling from a diffusion model trained to predict sequences of states.
28
+
29
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
30
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
31
+
32
+ Parameters:
33
+ value_function ([`UNet1DModel`]):
34
+ A specialized UNet for fine-tuning trajectories base on reward.
35
+ unet ([`UNet1DModel`]):
36
+ UNet architecture to denoise the encoded trajectories.
37
+ scheduler ([`SchedulerMixin`]):
38
+ A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this
39
+ application is [`DDPMScheduler`].
40
+ env ():
41
+ An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models.
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ value_function: UNet1DModel,
47
+ unet: UNet1DModel,
48
+ scheduler: DDPMScheduler,
49
+ env,
50
+ ):
51
+ super().__init__()
52
+
53
+ self.register_modules(value_function=value_function, unet=unet, scheduler=scheduler, env=env)
54
+
55
+ self.data = env.get_dataset()
56
+ self.means = {}
57
+ for key in self.data.keys():
58
+ try:
59
+ self.means[key] = self.data[key].mean()
60
+ except: # noqa: E722
61
+ pass
62
+ self.stds = {}
63
+ for key in self.data.keys():
64
+ try:
65
+ self.stds[key] = self.data[key].std()
66
+ except: # noqa: E722
67
+ pass
68
+ self.state_dim = env.observation_space.shape[0]
69
+ self.action_dim = env.action_space.shape[0]
70
+
71
+ def normalize(self, x_in, key):
72
+ return (x_in - self.means[key]) / self.stds[key]
73
+
74
+ def de_normalize(self, x_in, key):
75
+ return x_in * self.stds[key] + self.means[key]
76
+
77
+ def to_torch(self, x_in):
78
+ if isinstance(x_in, dict):
79
+ return {k: self.to_torch(v) for k, v in x_in.items()}
80
+ elif torch.is_tensor(x_in):
81
+ return x_in.to(self.unet.device)
82
+ return torch.tensor(x_in, device=self.unet.device)
83
+
84
+ def reset_x0(self, x_in, cond, act_dim):
85
+ for key, val in cond.items():
86
+ x_in[:, key, act_dim:] = val.clone()
87
+ return x_in
88
+
89
+ def run_diffusion(self, x, conditions, n_guide_steps, scale):
90
+ batch_size = x.shape[0]
91
+ y = None
92
+ for i in tqdm.tqdm(self.scheduler.timesteps):
93
+ # create batch of timesteps to pass into model
94
+ timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long)
95
+ for _ in range(n_guide_steps):
96
+ with torch.enable_grad():
97
+ x.requires_grad_()
98
+
99
+ # permute to match dimension for pre-trained models
100
+ y = self.value_function(x.permute(0, 2, 1), timesteps).sample
101
+ grad = torch.autograd.grad([y.sum()], [x])[0]
102
+
103
+ posterior_variance = self.scheduler._get_variance(i)
104
+ model_std = torch.exp(0.5 * posterior_variance)
105
+ grad = model_std * grad
106
+
107
+ grad[timesteps < 2] = 0
108
+ x = x.detach()
109
+ x = x + scale * grad
110
+ x = self.reset_x0(x, conditions, self.action_dim)
111
+
112
+ prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1)
113
+
114
+ # TODO: verify deprecation of this kwarg
115
+ x = self.scheduler.step(prev_x, i, x)["prev_sample"]
116
+
117
+ # apply conditions to the trajectory (set the initial state)
118
+ x = self.reset_x0(x, conditions, self.action_dim)
119
+ x = self.to_torch(x)
120
+ return x, y
121
+
122
+ def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1):
123
+ # normalize the observations and create batch dimension
124
+ obs = self.normalize(obs, "observations")
125
+ obs = obs[None].repeat(batch_size, axis=0)
126
+
127
+ conditions = {0: self.to_torch(obs)}
128
+ shape = (batch_size, planning_horizon, self.state_dim + self.action_dim)
129
+
130
+ # generate initial noise and apply our conditions (to make the trajectories start at current state)
131
+ x1 = randn_tensor(shape, device=self.unet.device)
132
+ x = self.reset_x0(x1, conditions, self.action_dim)
133
+ x = self.to_torch(x)
134
+
135
+ # run the diffusion process
136
+ x, y = self.run_diffusion(x, conditions, n_guide_steps, scale)
137
+
138
+ # sort output trajectories by value
139
+ sorted_idx = y.argsort(0, descending=True).squeeze()
140
+ sorted_values = x[sorted_idx]
141
+ actions = sorted_values[:, :, : self.action_dim]
142
+ actions = actions.detach().cpu().numpy()
143
+ denorm_actions = self.de_normalize(actions, key="actions")
144
+
145
+ # select the action with the highest value
146
+ if y is not None:
147
+ selected_index = 0
148
+ else:
149
+ # if we didn't run value guiding, select a random action
150
+ selected_index = np.random.randint(0, batch_size)
151
+
152
+ denorm_actions = denorm_actions[selected_index, 0]
153
+ return denorm_actions
diffusers/image_processor.py ADDED
@@ -0,0 +1,884 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+ from typing import List, Optional, Tuple, Union
17
+
18
+ import numpy as np
19
+ import PIL.Image
20
+ import torch
21
+ from PIL import Image, ImageFilter, ImageOps
22
+
23
+ from .configuration_utils import ConfigMixin, register_to_config
24
+ from .utils import CONFIG_NAME, PIL_INTERPOLATION, deprecate
25
+
26
+
27
+ PipelineImageInput = Union[
28
+ PIL.Image.Image,
29
+ np.ndarray,
30
+ torch.FloatTensor,
31
+ List[PIL.Image.Image],
32
+ List[np.ndarray],
33
+ List[torch.FloatTensor],
34
+ ]
35
+
36
+ PipelineDepthInput = PipelineImageInput
37
+
38
+
39
+ class VaeImageProcessor(ConfigMixin):
40
+ """
41
+ Image processor for VAE.
42
+
43
+ Args:
44
+ do_resize (`bool`, *optional*, defaults to `True`):
45
+ Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`. Can accept
46
+ `height` and `width` arguments from [`image_processor.VaeImageProcessor.preprocess`] method.
47
+ vae_scale_factor (`int`, *optional*, defaults to `8`):
48
+ VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor.
49
+ resample (`str`, *optional*, defaults to `lanczos`):
50
+ Resampling filter to use when resizing the image.
51
+ do_normalize (`bool`, *optional*, defaults to `True`):
52
+ Whether to normalize the image to [-1,1].
53
+ do_binarize (`bool`, *optional*, defaults to `False`):
54
+ Whether to binarize the image to 0/1.
55
+ do_convert_rgb (`bool`, *optional*, defaults to be `False`):
56
+ Whether to convert the images to RGB format.
57
+ do_convert_grayscale (`bool`, *optional*, defaults to be `False`):
58
+ Whether to convert the images to grayscale format.
59
+ """
60
+
61
+ config_name = CONFIG_NAME
62
+
63
+ @register_to_config
64
+ def __init__(
65
+ self,
66
+ do_resize: bool = True,
67
+ vae_scale_factor: int = 8,
68
+ resample: str = "lanczos",
69
+ do_normalize: bool = True,
70
+ do_binarize: bool = False,
71
+ do_convert_rgb: bool = False,
72
+ do_convert_grayscale: bool = False,
73
+ ):
74
+ super().__init__()
75
+ if do_convert_rgb and do_convert_grayscale:
76
+ raise ValueError(
77
+ "`do_convert_rgb` and `do_convert_grayscale` can not both be set to `True`,"
78
+ " if you intended to convert the image into RGB format, please set `do_convert_grayscale = False`.",
79
+ " if you intended to convert the image into grayscale format, please set `do_convert_rgb = False`",
80
+ )
81
+ self.config.do_convert_rgb = False
82
+
83
+ @staticmethod
84
+ def numpy_to_pil(images: np.ndarray) -> List[PIL.Image.Image]:
85
+ """
86
+ Convert a numpy image or a batch of images to a PIL image.
87
+ """
88
+ if images.ndim == 3:
89
+ images = images[None, ...]
90
+ images = (images * 255).round().astype("uint8")
91
+ if images.shape[-1] == 1:
92
+ # special case for grayscale (single channel) images
93
+ pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images]
94
+ else:
95
+ pil_images = [Image.fromarray(image) for image in images]
96
+
97
+ return pil_images
98
+
99
+ @staticmethod
100
+ def pil_to_numpy(images: Union[List[PIL.Image.Image], PIL.Image.Image]) -> np.ndarray:
101
+ """
102
+ Convert a PIL image or a list of PIL images to NumPy arrays.
103
+ """
104
+ if not isinstance(images, list):
105
+ images = [images]
106
+ images = [np.array(image).astype(np.float32) / 255.0 for image in images]
107
+ images = np.stack(images, axis=0)
108
+
109
+ return images
110
+
111
+ @staticmethod
112
+ def numpy_to_pt(images: np.ndarray) -> torch.FloatTensor:
113
+ """
114
+ Convert a NumPy image to a PyTorch tensor.
115
+ """
116
+ if images.ndim == 3:
117
+ images = images[..., None]
118
+
119
+ images = torch.from_numpy(images.transpose(0, 3, 1, 2))
120
+ return images
121
+
122
+ @staticmethod
123
+ def pt_to_numpy(images: torch.FloatTensor) -> np.ndarray:
124
+ """
125
+ Convert a PyTorch tensor to a NumPy image.
126
+ """
127
+ images = images.cpu().permute(0, 2, 3, 1).float().numpy()
128
+ return images
129
+
130
+ @staticmethod
131
+ def normalize(images: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
132
+ """
133
+ Normalize an image array to [-1,1].
134
+ """
135
+ return 2.0 * images - 1.0
136
+
137
+ @staticmethod
138
+ def denormalize(images: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
139
+ """
140
+ Denormalize an image array to [0,1].
141
+ """
142
+ return (images / 2 + 0.5).clamp(0, 1)
143
+
144
+ @staticmethod
145
+ def convert_to_rgb(image: PIL.Image.Image) -> PIL.Image.Image:
146
+ """
147
+ Converts a PIL image to RGB format.
148
+ """
149
+ image = image.convert("RGB")
150
+
151
+ return image
152
+
153
+ @staticmethod
154
+ def convert_to_grayscale(image: PIL.Image.Image) -> PIL.Image.Image:
155
+ """
156
+ Converts a PIL image to grayscale format.
157
+ """
158
+ image = image.convert("L")
159
+
160
+ return image
161
+
162
+ @staticmethod
163
+ def blur(image: PIL.Image.Image, blur_factor: int = 4) -> PIL.Image.Image:
164
+ """
165
+ Applies Gaussian blur to an image.
166
+ """
167
+ image = image.filter(ImageFilter.GaussianBlur(blur_factor))
168
+
169
+ return image
170
+
171
+ @staticmethod
172
+ def get_crop_region(mask_image: PIL.Image.Image, width: int, height: int, pad=0):
173
+ """
174
+ Finds a rectangular region that contains all masked ares in an image, and expands region to match the aspect ratio of the original image;
175
+ for example, if user drew mask in a 128x32 region, and the dimensions for processing are 512x512, the region will be expanded to 128x128.
176
+
177
+ Args:
178
+ mask_image (PIL.Image.Image): Mask image.
179
+ width (int): Width of the image to be processed.
180
+ height (int): Height of the image to be processed.
181
+ pad (int, optional): Padding to be added to the crop region. Defaults to 0.
182
+
183
+ Returns:
184
+ tuple: (x1, y1, x2, y2) represent a rectangular region that contains all masked ares in an image and matches the original aspect ratio.
185
+ """
186
+
187
+ mask_image = mask_image.convert("L")
188
+ mask = np.array(mask_image)
189
+
190
+ # 1. find a rectangular region that contains all masked ares in an image
191
+ h, w = mask.shape
192
+ crop_left = 0
193
+ for i in range(w):
194
+ if not (mask[:, i] == 0).all():
195
+ break
196
+ crop_left += 1
197
+
198
+ crop_right = 0
199
+ for i in reversed(range(w)):
200
+ if not (mask[:, i] == 0).all():
201
+ break
202
+ crop_right += 1
203
+
204
+ crop_top = 0
205
+ for i in range(h):
206
+ if not (mask[i] == 0).all():
207
+ break
208
+ crop_top += 1
209
+
210
+ crop_bottom = 0
211
+ for i in reversed(range(h)):
212
+ if not (mask[i] == 0).all():
213
+ break
214
+ crop_bottom += 1
215
+
216
+ # 2. add padding to the crop region
217
+ x1, y1, x2, y2 = (
218
+ int(max(crop_left - pad, 0)),
219
+ int(max(crop_top - pad, 0)),
220
+ int(min(w - crop_right + pad, w)),
221
+ int(min(h - crop_bottom + pad, h)),
222
+ )
223
+
224
+ # 3. expands crop region to match the aspect ratio of the image to be processed
225
+ ratio_crop_region = (x2 - x1) / (y2 - y1)
226
+ ratio_processing = width / height
227
+
228
+ if ratio_crop_region > ratio_processing:
229
+ desired_height = (x2 - x1) / ratio_processing
230
+ desired_height_diff = int(desired_height - (y2 - y1))
231
+ y1 -= desired_height_diff // 2
232
+ y2 += desired_height_diff - desired_height_diff // 2
233
+ if y2 >= mask_image.height:
234
+ diff = y2 - mask_image.height
235
+ y2 -= diff
236
+ y1 -= diff
237
+ if y1 < 0:
238
+ y2 -= y1
239
+ y1 -= y1
240
+ if y2 >= mask_image.height:
241
+ y2 = mask_image.height
242
+ else:
243
+ desired_width = (y2 - y1) * ratio_processing
244
+ desired_width_diff = int(desired_width - (x2 - x1))
245
+ x1 -= desired_width_diff // 2
246
+ x2 += desired_width_diff - desired_width_diff // 2
247
+ if x2 >= mask_image.width:
248
+ diff = x2 - mask_image.width
249
+ x2 -= diff
250
+ x1 -= diff
251
+ if x1 < 0:
252
+ x2 -= x1
253
+ x1 -= x1
254
+ if x2 >= mask_image.width:
255
+ x2 = mask_image.width
256
+
257
+ return x1, y1, x2, y2
258
+
259
+ def _resize_and_fill(
260
+ self,
261
+ image: PIL.Image.Image,
262
+ width: int,
263
+ height: int,
264
+ ) -> PIL.Image.Image:
265
+ """
266
+ Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image.
267
+
268
+ Args:
269
+ image: The image to resize.
270
+ width: The width to resize the image to.
271
+ height: The height to resize the image to.
272
+ """
273
+
274
+ ratio = width / height
275
+ src_ratio = image.width / image.height
276
+
277
+ src_w = width if ratio < src_ratio else image.width * height // image.height
278
+ src_h = height if ratio >= src_ratio else image.height * width // image.width
279
+
280
+ resized = image.resize((src_w, src_h), resample=PIL_INTERPOLATION["lanczos"])
281
+ res = Image.new("RGB", (width, height))
282
+ res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
283
+
284
+ if ratio < src_ratio:
285
+ fill_height = height // 2 - src_h // 2
286
+ if fill_height > 0:
287
+ res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
288
+ res.paste(
289
+ resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)),
290
+ box=(0, fill_height + src_h),
291
+ )
292
+ elif ratio > src_ratio:
293
+ fill_width = width // 2 - src_w // 2
294
+ if fill_width > 0:
295
+ res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
296
+ res.paste(
297
+ resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)),
298
+ box=(fill_width + src_w, 0),
299
+ )
300
+
301
+ return res
302
+
303
+ def _resize_and_crop(
304
+ self,
305
+ image: PIL.Image.Image,
306
+ width: int,
307
+ height: int,
308
+ ) -> PIL.Image.Image:
309
+ """
310
+ Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess.
311
+
312
+ Args:
313
+ image: The image to resize.
314
+ width: The width to resize the image to.
315
+ height: The height to resize the image to.
316
+ """
317
+ ratio = width / height
318
+ src_ratio = image.width / image.height
319
+
320
+ src_w = width if ratio > src_ratio else image.width * height // image.height
321
+ src_h = height if ratio <= src_ratio else image.height * width // image.width
322
+
323
+ resized = image.resize((src_w, src_h), resample=PIL_INTERPOLATION["lanczos"])
324
+ res = Image.new("RGB", (width, height))
325
+ res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
326
+ return res
327
+
328
+ def resize(
329
+ self,
330
+ image: Union[PIL.Image.Image, np.ndarray, torch.Tensor],
331
+ height: int,
332
+ width: int,
333
+ resize_mode: str = "default", # "defalt", "fill", "crop"
334
+ ) -> Union[PIL.Image.Image, np.ndarray, torch.Tensor]:
335
+ """
336
+ Resize image.
337
+
338
+ Args:
339
+ image (`PIL.Image.Image`, `np.ndarray` or `torch.Tensor`):
340
+ The image input, can be a PIL image, numpy array or pytorch tensor.
341
+ height (`int`):
342
+ The height to resize to.
343
+ width (`int`):
344
+ The width to resize to.
345
+ resize_mode (`str`, *optional*, defaults to `default`):
346
+ The resize mode to use, can be one of `default` or `fill`. If `default`, will resize the image to fit
347
+ within the specified width and height, and it may not maintaining the original aspect ratio.
348
+ If `fill`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image
349
+ within the dimensions, filling empty with data from image.
350
+ If `crop`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image
351
+ within the dimensions, cropping the excess.
352
+ Note that resize_mode `fill` and `crop` are only supported for PIL image input.
353
+
354
+ Returns:
355
+ `PIL.Image.Image`, `np.ndarray` or `torch.Tensor`:
356
+ The resized image.
357
+ """
358
+ if resize_mode != "default" and not isinstance(image, PIL.Image.Image):
359
+ raise ValueError(f"Only PIL image input is supported for resize_mode {resize_mode}")
360
+ if isinstance(image, PIL.Image.Image):
361
+ if resize_mode == "default":
362
+ image = image.resize((width, height), resample=PIL_INTERPOLATION[self.config.resample])
363
+ elif resize_mode == "fill":
364
+ image = self._resize_and_fill(image, width, height)
365
+ elif resize_mode == "crop":
366
+ image = self._resize_and_crop(image, width, height)
367
+ else:
368
+ raise ValueError(f"resize_mode {resize_mode} is not supported")
369
+
370
+ elif isinstance(image, torch.Tensor):
371
+ image = torch.nn.functional.interpolate(
372
+ image,
373
+ size=(height, width),
374
+ )
375
+ elif isinstance(image, np.ndarray):
376
+ image = self.numpy_to_pt(image)
377
+ image = torch.nn.functional.interpolate(
378
+ image,
379
+ size=(height, width),
380
+ )
381
+ image = self.pt_to_numpy(image)
382
+ return image
383
+
384
+ def binarize(self, image: PIL.Image.Image) -> PIL.Image.Image:
385
+ """
386
+ Create a mask.
387
+
388
+ Args:
389
+ image (`PIL.Image.Image`):
390
+ The image input, should be a PIL image.
391
+
392
+ Returns:
393
+ `PIL.Image.Image`:
394
+ The binarized image. Values less than 0.5 are set to 0, values greater than 0.5 are set to 1.
395
+ """
396
+ image[image < 0.5] = 0
397
+ image[image >= 0.5] = 1
398
+
399
+ return image
400
+
401
+ def get_default_height_width(
402
+ self,
403
+ image: Union[PIL.Image.Image, np.ndarray, torch.Tensor],
404
+ height: Optional[int] = None,
405
+ width: Optional[int] = None,
406
+ ) -> Tuple[int, int]:
407
+ """
408
+ This function return the height and width that are downscaled to the next integer multiple of
409
+ `vae_scale_factor`.
410
+
411
+ Args:
412
+ image(`PIL.Image.Image`, `np.ndarray` or `torch.Tensor`):
413
+ The image input, can be a PIL image, numpy array or pytorch tensor. if it is a numpy array, should have
414
+ shape `[batch, height, width]` or `[batch, height, width, channel]` if it is a pytorch tensor, should
415
+ have shape `[batch, channel, height, width]`.
416
+ height (`int`, *optional*, defaults to `None`):
417
+ The height in preprocessed image. If `None`, will use the height of `image` input.
418
+ width (`int`, *optional*`, defaults to `None`):
419
+ The width in preprocessed. If `None`, will use the width of the `image` input.
420
+ """
421
+
422
+ if height is None:
423
+ if isinstance(image, PIL.Image.Image):
424
+ height = image.height
425
+ elif isinstance(image, torch.Tensor):
426
+ height = image.shape[2]
427
+ else:
428
+ height = image.shape[1]
429
+
430
+ if width is None:
431
+ if isinstance(image, PIL.Image.Image):
432
+ width = image.width
433
+ elif isinstance(image, torch.Tensor):
434
+ width = image.shape[3]
435
+ else:
436
+ width = image.shape[2]
437
+
438
+ width, height = (
439
+ x - x % self.config.vae_scale_factor for x in (width, height)
440
+ ) # resize to integer multiple of vae_scale_factor
441
+
442
+ return height, width
443
+
444
+ def preprocess(
445
+ self,
446
+ image: PipelineImageInput,
447
+ height: Optional[int] = None,
448
+ width: Optional[int] = None,
449
+ resize_mode: str = "default", # "defalt", "fill", "crop"
450
+ crops_coords: Optional[Tuple[int, int, int, int]] = None,
451
+ ) -> torch.Tensor:
452
+ """
453
+ Preprocess the image input.
454
+
455
+ Args:
456
+ image (`pipeline_image_input`):
457
+ The image input, accepted formats are PIL images, NumPy arrays, PyTorch tensors; Also accept list of supported formats.
458
+ height (`int`, *optional*, defaults to `None`):
459
+ The height in preprocessed image. If `None`, will use the `get_default_height_width()` to get default height.
460
+ width (`int`, *optional*`, defaults to `None`):
461
+ The width in preprocessed. If `None`, will use get_default_height_width()` to get the default width.
462
+ resize_mode (`str`, *optional*, defaults to `default`):
463
+ The resize mode, can be one of `default` or `fill`. If `default`, will resize the image to fit
464
+ within the specified width and height, and it may not maintaining the original aspect ratio.
465
+ If `fill`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image
466
+ within the dimensions, filling empty with data from image.
467
+ If `crop`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image
468
+ within the dimensions, cropping the excess.
469
+ Note that resize_mode `fill` and `crop` are only supported for PIL image input.
470
+ crops_coords (`List[Tuple[int, int, int, int]]`, *optional*, defaults to `None`):
471
+ The crop coordinates for each image in the batch. If `None`, will not crop the image.
472
+ """
473
+ supported_formats = (PIL.Image.Image, np.ndarray, torch.Tensor)
474
+
475
+ # Expand the missing dimension for 3-dimensional pytorch tensor or numpy array that represents grayscale image
476
+ if self.config.do_convert_grayscale and isinstance(image, (torch.Tensor, np.ndarray)) and image.ndim == 3:
477
+ if isinstance(image, torch.Tensor):
478
+ # if image is a pytorch tensor could have 2 possible shapes:
479
+ # 1. batch x height x width: we should insert the channel dimension at position 1
480
+ # 2. channnel x height x width: we should insert batch dimension at position 0,
481
+ # however, since both channel and batch dimension has same size 1, it is same to insert at position 1
482
+ # for simplicity, we insert a dimension of size 1 at position 1 for both cases
483
+ image = image.unsqueeze(1)
484
+ else:
485
+ # if it is a numpy array, it could have 2 possible shapes:
486
+ # 1. batch x height x width: insert channel dimension on last position
487
+ # 2. height x width x channel: insert batch dimension on first position
488
+ if image.shape[-1] == 1:
489
+ image = np.expand_dims(image, axis=0)
490
+ else:
491
+ image = np.expand_dims(image, axis=-1)
492
+
493
+ if isinstance(image, supported_formats):
494
+ image = [image]
495
+ elif not (isinstance(image, list) and all(isinstance(i, supported_formats) for i in image)):
496
+ raise ValueError(
497
+ f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support {', '.join(supported_formats)}"
498
+ )
499
+
500
+ if isinstance(image[0], PIL.Image.Image):
501
+ if crops_coords is not None:
502
+ image = [i.crop(crops_coords) for i in image]
503
+ if self.config.do_resize:
504
+ height, width = self.get_default_height_width(image[0], height, width)
505
+ image = [self.resize(i, height, width, resize_mode=resize_mode) for i in image]
506
+ if self.config.do_convert_rgb:
507
+ image = [self.convert_to_rgb(i) for i in image]
508
+ elif self.config.do_convert_grayscale:
509
+ image = [self.convert_to_grayscale(i) for i in image]
510
+ image = self.pil_to_numpy(image) # to np
511
+ image = self.numpy_to_pt(image) # to pt
512
+
513
+ elif isinstance(image[0], np.ndarray):
514
+ image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0)
515
+
516
+ image = self.numpy_to_pt(image)
517
+
518
+ height, width = self.get_default_height_width(image, height, width)
519
+ if self.config.do_resize:
520
+ image = self.resize(image, height, width)
521
+
522
+ elif isinstance(image[0], torch.Tensor):
523
+ image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0)
524
+
525
+ if self.config.do_convert_grayscale and image.ndim == 3:
526
+ image = image.unsqueeze(1)
527
+
528
+ channel = image.shape[1]
529
+ # don't need any preprocess if the image is latents
530
+ if channel == 4:
531
+ return image
532
+
533
+ height, width = self.get_default_height_width(image, height, width)
534
+ if self.config.do_resize:
535
+ image = self.resize(image, height, width)
536
+
537
+ # expected range [0,1], normalize to [-1,1]
538
+ do_normalize = self.config.do_normalize
539
+ if do_normalize and image.min() < 0:
540
+ warnings.warn(
541
+ "Passing `image` as torch tensor with value range in [-1,1] is deprecated. The expected value range for image tensor is [0,1] "
542
+ f"when passing as pytorch tensor or numpy Array. You passed `image` with value range [{image.min()},{image.max()}]",
543
+ FutureWarning,
544
+ )
545
+ do_normalize = False
546
+
547
+ if do_normalize:
548
+ image = self.normalize(image)
549
+
550
+ if self.config.do_binarize:
551
+ image = self.binarize(image)
552
+
553
+ return image
554
+
555
+ def postprocess(
556
+ self,
557
+ image: torch.FloatTensor,
558
+ output_type: str = "pil",
559
+ do_denormalize: Optional[List[bool]] = None,
560
+ ) -> Union[PIL.Image.Image, np.ndarray, torch.FloatTensor]:
561
+ """
562
+ Postprocess the image output from tensor to `output_type`.
563
+
564
+ Args:
565
+ image (`torch.FloatTensor`):
566
+ The image input, should be a pytorch tensor with shape `B x C x H x W`.
567
+ output_type (`str`, *optional*, defaults to `pil`):
568
+ The output type of the image, can be one of `pil`, `np`, `pt`, `latent`.
569
+ do_denormalize (`List[bool]`, *optional*, defaults to `None`):
570
+ Whether to denormalize the image to [0,1]. If `None`, will use the value of `do_normalize` in the
571
+ `VaeImageProcessor` config.
572
+
573
+ Returns:
574
+ `PIL.Image.Image`, `np.ndarray` or `torch.FloatTensor`:
575
+ The postprocessed image.
576
+ """
577
+ if not isinstance(image, torch.Tensor):
578
+ raise ValueError(
579
+ f"Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor"
580
+ )
581
+ if output_type not in ["latent", "pt", "np", "pil"]:
582
+ deprecation_message = (
583
+ f"the output_type {output_type} is outdated and has been set to `np`. Please make sure to set it to one of these instead: "
584
+ "`pil`, `np`, `pt`, `latent`"
585
+ )
586
+ deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False)
587
+ output_type = "np"
588
+
589
+ if output_type == "latent":
590
+ return image
591
+
592
+ if do_denormalize is None:
593
+ do_denormalize = [self.config.do_normalize] * image.shape[0]
594
+
595
+ image = torch.stack(
596
+ [self.denormalize(image[i]) if do_denormalize[i] else image[i] for i in range(image.shape[0])]
597
+ )
598
+
599
+ if output_type == "pt":
600
+ return image
601
+
602
+ image = self.pt_to_numpy(image)
603
+
604
+ if output_type == "np":
605
+ return image
606
+
607
+ if output_type == "pil":
608
+ return self.numpy_to_pil(image)
609
+
610
+ def apply_overlay(
611
+ self,
612
+ mask: PIL.Image.Image,
613
+ init_image: PIL.Image.Image,
614
+ image: PIL.Image.Image,
615
+ crop_coords: Optional[Tuple[int, int, int, int]] = None,
616
+ ) -> PIL.Image.Image:
617
+ """
618
+ overlay the inpaint output to the original image
619
+ """
620
+
621
+ width, height = image.width, image.height
622
+
623
+ init_image = self.resize(init_image, width=width, height=height)
624
+ mask = self.resize(mask, width=width, height=height)
625
+
626
+ init_image_masked = PIL.Image.new("RGBa", (width, height))
627
+ init_image_masked.paste(init_image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(mask.convert("L")))
628
+ init_image_masked = init_image_masked.convert("RGBA")
629
+
630
+ if crop_coords is not None:
631
+ x, y, x2, y2 = crop_coords
632
+ w = x2 - x
633
+ h = y2 - y
634
+ base_image = PIL.Image.new("RGBA", (width, height))
635
+ image = self.resize(image, height=h, width=w, resize_mode="crop")
636
+ base_image.paste(image, (x, y))
637
+ image = base_image.convert("RGB")
638
+
639
+ image = image.convert("RGBA")
640
+ image.alpha_composite(init_image_masked)
641
+ image = image.convert("RGB")
642
+
643
+ return image
644
+
645
+
646
+ class VaeImageProcessorLDM3D(VaeImageProcessor):
647
+ """
648
+ Image processor for VAE LDM3D.
649
+
650
+ Args:
651
+ do_resize (`bool`, *optional*, defaults to `True`):
652
+ Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`.
653
+ vae_scale_factor (`int`, *optional*, defaults to `8`):
654
+ VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor.
655
+ resample (`str`, *optional*, defaults to `lanczos`):
656
+ Resampling filter to use when resizing the image.
657
+ do_normalize (`bool`, *optional*, defaults to `True`):
658
+ Whether to normalize the image to [-1,1].
659
+ """
660
+
661
+ config_name = CONFIG_NAME
662
+
663
+ @register_to_config
664
+ def __init__(
665
+ self,
666
+ do_resize: bool = True,
667
+ vae_scale_factor: int = 8,
668
+ resample: str = "lanczos",
669
+ do_normalize: bool = True,
670
+ ):
671
+ super().__init__()
672
+
673
+ @staticmethod
674
+ def numpy_to_pil(images: np.ndarray) -> List[PIL.Image.Image]:
675
+ """
676
+ Convert a NumPy image or a batch of images to a PIL image.
677
+ """
678
+ if images.ndim == 3:
679
+ images = images[None, ...]
680
+ images = (images * 255).round().astype("uint8")
681
+ if images.shape[-1] == 1:
682
+ # special case for grayscale (single channel) images
683
+ pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images]
684
+ else:
685
+ pil_images = [Image.fromarray(image[:, :, :3]) for image in images]
686
+
687
+ return pil_images
688
+
689
+ @staticmethod
690
+ def depth_pil_to_numpy(images: Union[List[PIL.Image.Image], PIL.Image.Image]) -> np.ndarray:
691
+ """
692
+ Convert a PIL image or a list of PIL images to NumPy arrays.
693
+ """
694
+ if not isinstance(images, list):
695
+ images = [images]
696
+
697
+ images = [np.array(image).astype(np.float32) / (2**16 - 1) for image in images]
698
+ images = np.stack(images, axis=0)
699
+ return images
700
+
701
+ @staticmethod
702
+ def rgblike_to_depthmap(image: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
703
+ """
704
+ Args:
705
+ image: RGB-like depth image
706
+
707
+ Returns: depth map
708
+
709
+ """
710
+ return image[:, :, 1] * 2**8 + image[:, :, 2]
711
+
712
+ def numpy_to_depth(self, images: np.ndarray) -> List[PIL.Image.Image]:
713
+ """
714
+ Convert a NumPy depth image or a batch of images to a PIL image.
715
+ """
716
+ if images.ndim == 3:
717
+ images = images[None, ...]
718
+ images_depth = images[:, :, :, 3:]
719
+ if images.shape[-1] == 6:
720
+ images_depth = (images_depth * 255).round().astype("uint8")
721
+ pil_images = [
722
+ Image.fromarray(self.rgblike_to_depthmap(image_depth), mode="I;16") for image_depth in images_depth
723
+ ]
724
+ elif images.shape[-1] == 4:
725
+ images_depth = (images_depth * 65535.0).astype(np.uint16)
726
+ pil_images = [Image.fromarray(image_depth, mode="I;16") for image_depth in images_depth]
727
+ else:
728
+ raise Exception("Not supported")
729
+
730
+ return pil_images
731
+
732
+ def postprocess(
733
+ self,
734
+ image: torch.FloatTensor,
735
+ output_type: str = "pil",
736
+ do_denormalize: Optional[List[bool]] = None,
737
+ ) -> Union[PIL.Image.Image, np.ndarray, torch.FloatTensor]:
738
+ """
739
+ Postprocess the image output from tensor to `output_type`.
740
+
741
+ Args:
742
+ image (`torch.FloatTensor`):
743
+ The image input, should be a pytorch tensor with shape `B x C x H x W`.
744
+ output_type (`str`, *optional*, defaults to `pil`):
745
+ The output type of the image, can be one of `pil`, `np`, `pt`, `latent`.
746
+ do_denormalize (`List[bool]`, *optional*, defaults to `None`):
747
+ Whether to denormalize the image to [0,1]. If `None`, will use the value of `do_normalize` in the
748
+ `VaeImageProcessor` config.
749
+
750
+ Returns:
751
+ `PIL.Image.Image`, `np.ndarray` or `torch.FloatTensor`:
752
+ The postprocessed image.
753
+ """
754
+ if not isinstance(image, torch.Tensor):
755
+ raise ValueError(
756
+ f"Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor"
757
+ )
758
+ if output_type not in ["latent", "pt", "np", "pil"]:
759
+ deprecation_message = (
760
+ f"the output_type {output_type} is outdated and has been set to `np`. Please make sure to set it to one of these instead: "
761
+ "`pil`, `np`, `pt`, `latent`"
762
+ )
763
+ deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False)
764
+ output_type = "np"
765
+
766
+ if do_denormalize is None:
767
+ do_denormalize = [self.config.do_normalize] * image.shape[0]
768
+
769
+ image = torch.stack(
770
+ [self.denormalize(image[i]) if do_denormalize[i] else image[i] for i in range(image.shape[0])]
771
+ )
772
+
773
+ image = self.pt_to_numpy(image)
774
+
775
+ if output_type == "np":
776
+ if image.shape[-1] == 6:
777
+ image_depth = np.stack([self.rgblike_to_depthmap(im[:, :, 3:]) for im in image], axis=0)
778
+ else:
779
+ image_depth = image[:, :, :, 3:]
780
+ return image[:, :, :, :3], image_depth
781
+
782
+ if output_type == "pil":
783
+ return self.numpy_to_pil(image), self.numpy_to_depth(image)
784
+ else:
785
+ raise Exception(f"This type {output_type} is not supported")
786
+
787
+ def preprocess(
788
+ self,
789
+ rgb: Union[torch.FloatTensor, PIL.Image.Image, np.ndarray],
790
+ depth: Union[torch.FloatTensor, PIL.Image.Image, np.ndarray],
791
+ height: Optional[int] = None,
792
+ width: Optional[int] = None,
793
+ target_res: Optional[int] = None,
794
+ ) -> torch.Tensor:
795
+ """
796
+ Preprocess the image input. Accepted formats are PIL images, NumPy arrays or PyTorch tensors.
797
+ """
798
+ supported_formats = (PIL.Image.Image, np.ndarray, torch.Tensor)
799
+
800
+ # Expand the missing dimension for 3-dimensional pytorch tensor or numpy array that represents grayscale image
801
+ if self.config.do_convert_grayscale and isinstance(rgb, (torch.Tensor, np.ndarray)) and rgb.ndim == 3:
802
+ raise Exception("This is not yet supported")
803
+
804
+ if isinstance(rgb, supported_formats):
805
+ rgb = [rgb]
806
+ depth = [depth]
807
+ elif not (isinstance(rgb, list) and all(isinstance(i, supported_formats) for i in rgb)):
808
+ raise ValueError(
809
+ f"Input is in incorrect format: {[type(i) for i in rgb]}. Currently, we only support {', '.join(supported_formats)}"
810
+ )
811
+
812
+ if isinstance(rgb[0], PIL.Image.Image):
813
+ if self.config.do_convert_rgb:
814
+ raise Exception("This is not yet supported")
815
+ # rgb = [self.convert_to_rgb(i) for i in rgb]
816
+ # depth = [self.convert_to_depth(i) for i in depth] #TODO define convert_to_depth
817
+ if self.config.do_resize or target_res:
818
+ height, width = self.get_default_height_width(rgb[0], height, width) if not target_res else target_res
819
+ rgb = [self.resize(i, height, width) for i in rgb]
820
+ depth = [self.resize(i, height, width) for i in depth]
821
+ rgb = self.pil_to_numpy(rgb) # to np
822
+ rgb = self.numpy_to_pt(rgb) # to pt
823
+
824
+ depth = self.depth_pil_to_numpy(depth) # to np
825
+ depth = self.numpy_to_pt(depth) # to pt
826
+
827
+ elif isinstance(rgb[0], np.ndarray):
828
+ rgb = np.concatenate(rgb, axis=0) if rgb[0].ndim == 4 else np.stack(rgb, axis=0)
829
+ rgb = self.numpy_to_pt(rgb)
830
+ height, width = self.get_default_height_width(rgb, height, width)
831
+ if self.config.do_resize:
832
+ rgb = self.resize(rgb, height, width)
833
+
834
+ depth = np.concatenate(depth, axis=0) if rgb[0].ndim == 4 else np.stack(depth, axis=0)
835
+ depth = self.numpy_to_pt(depth)
836
+ height, width = self.get_default_height_width(depth, height, width)
837
+ if self.config.do_resize:
838
+ depth = self.resize(depth, height, width)
839
+
840
+ elif isinstance(rgb[0], torch.Tensor):
841
+ raise Exception("This is not yet supported")
842
+ # rgb = torch.cat(rgb, axis=0) if rgb[0].ndim == 4 else torch.stack(rgb, axis=0)
843
+
844
+ # if self.config.do_convert_grayscale and rgb.ndim == 3:
845
+ # rgb = rgb.unsqueeze(1)
846
+
847
+ # channel = rgb.shape[1]
848
+
849
+ # height, width = self.get_default_height_width(rgb, height, width)
850
+ # if self.config.do_resize:
851
+ # rgb = self.resize(rgb, height, width)
852
+
853
+ # depth = torch.cat(depth, axis=0) if depth[0].ndim == 4 else torch.stack(depth, axis=0)
854
+
855
+ # if self.config.do_convert_grayscale and depth.ndim == 3:
856
+ # depth = depth.unsqueeze(1)
857
+
858
+ # channel = depth.shape[1]
859
+ # # don't need any preprocess if the image is latents
860
+ # if depth == 4:
861
+ # return rgb, depth
862
+
863
+ # height, width = self.get_default_height_width(depth, height, width)
864
+ # if self.config.do_resize:
865
+ # depth = self.resize(depth, height, width)
866
+ # expected range [0,1], normalize to [-1,1]
867
+ do_normalize = self.config.do_normalize
868
+ if rgb.min() < 0 and do_normalize:
869
+ warnings.warn(
870
+ "Passing `image` as torch tensor with value range in [-1,1] is deprecated. The expected value range for image tensor is [0,1] "
871
+ f"when passing as pytorch tensor or numpy Array. You passed `image` with value range [{rgb.min()},{rgb.max()}]",
872
+ FutureWarning,
873
+ )
874
+ do_normalize = False
875
+
876
+ if do_normalize:
877
+ rgb = self.normalize(rgb)
878
+ depth = self.normalize(depth)
879
+
880
+ if self.config.do_binarize:
881
+ rgb = self.binarize(rgb)
882
+ depth = self.binarize(depth)
883
+
884
+ return rgb, depth
diffusers/loaders/__init__.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate
4
+ from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available
5
+
6
+
7
+ def text_encoder_lora_state_dict(text_encoder):
8
+ deprecate(
9
+ "text_encoder_load_state_dict in `models`",
10
+ "0.27.0",
11
+ "`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
12
+ )
13
+ state_dict = {}
14
+
15
+ for name, module in text_encoder_attn_modules(text_encoder):
16
+ for k, v in module.q_proj.lora_linear_layer.state_dict().items():
17
+ state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v
18
+
19
+ for k, v in module.k_proj.lora_linear_layer.state_dict().items():
20
+ state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v
21
+
22
+ for k, v in module.v_proj.lora_linear_layer.state_dict().items():
23
+ state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v
24
+
25
+ for k, v in module.out_proj.lora_linear_layer.state_dict().items():
26
+ state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v
27
+
28
+ return state_dict
29
+
30
+
31
+ if is_transformers_available():
32
+
33
+ def text_encoder_attn_modules(text_encoder):
34
+ deprecate(
35
+ "text_encoder_attn_modules in `models`",
36
+ "0.27.0",
37
+ "`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
38
+ )
39
+ from transformers import CLIPTextModel, CLIPTextModelWithProjection
40
+
41
+ attn_modules = []
42
+
43
+ if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
44
+ for i, layer in enumerate(text_encoder.text_model.encoder.layers):
45
+ name = f"text_model.encoder.layers.{i}.self_attn"
46
+ mod = layer.self_attn
47
+ attn_modules.append((name, mod))
48
+ else:
49
+ raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")
50
+
51
+ return attn_modules
52
+
53
+
54
+ _import_structure = {}
55
+
56
+ if is_torch_available():
57
+ _import_structure["single_file"] = ["FromOriginalControlnetMixin", "FromOriginalVAEMixin"]
58
+ _import_structure["unet"] = ["UNet2DConditionLoadersMixin"]
59
+ _import_structure["utils"] = ["AttnProcsLayers"]
60
+
61
+ if is_transformers_available():
62
+ _import_structure["single_file"].extend(["FromSingleFileMixin"])
63
+ _import_structure["lora"] = ["LoraLoaderMixin", "StableDiffusionXLLoraLoaderMixin"]
64
+ _import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"]
65
+ _import_structure["ip_adapter"] = ["IPAdapterMixin"]
66
+
67
+ _import_structure["peft"] = ["PeftAdapterMixin"]
68
+
69
+
70
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
71
+ if is_torch_available():
72
+ from .single_file import FromOriginalControlnetMixin, FromOriginalVAEMixin
73
+ from .unet import UNet2DConditionLoadersMixin
74
+ from .utils import AttnProcsLayers
75
+
76
+ if is_transformers_available():
77
+ from .ip_adapter import IPAdapterMixin
78
+ from .lora import LoraLoaderMixin, StableDiffusionXLLoraLoaderMixin
79
+ from .single_file import FromSingleFileMixin
80
+ from .textual_inversion import TextualInversionLoaderMixin
81
+
82
+ from .peft import PeftAdapterMixin
83
+ else:
84
+ import sys
85
+
86
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diffusers/loaders/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (2.78 kB). View file
 
diffusers/loaders/__pycache__/ip_adapter.cpython-38.pyc ADDED
Binary file (6.22 kB). View file
 
diffusers/loaders/__pycache__/lora.cpython-38.pyc ADDED
Binary file (49.3 kB). View file
 
diffusers/loaders/__pycache__/lora_conversion_utils.cpython-38.pyc ADDED
Binary file (7.23 kB). View file
 
diffusers/loaders/__pycache__/peft.cpython-38.pyc ADDED
Binary file (6.15 kB). View file
 
diffusers/loaders/__pycache__/single_file.cpython-38.pyc ADDED
Binary file (24.3 kB). View file
 
diffusers/loaders/__pycache__/textual_inversion.cpython-38.pyc ADDED
Binary file (16 kB). View file
 
diffusers/loaders/__pycache__/unet.cpython-38.pyc ADDED
Binary file (26.1 kB). View file
 
diffusers/loaders/__pycache__/utils.cpython-38.pyc ADDED
Binary file (2.08 kB). View file
 
diffusers/loaders/ip_adapter.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from pathlib import Path
15
+ from typing import Dict, Union
16
+
17
+ import torch
18
+ from huggingface_hub.utils import validate_hf_hub_args
19
+ from safetensors import safe_open
20
+
21
+ from ..utils import (
22
+ _get_model_file,
23
+ is_transformers_available,
24
+ logging,
25
+ )
26
+
27
+
28
+ if is_transformers_available():
29
+ from transformers import (
30
+ CLIPImageProcessor,
31
+ CLIPVisionModelWithProjection,
32
+ )
33
+
34
+ from ..models.attention_processor import (
35
+ IPAdapterAttnProcessor,
36
+ IPAdapterAttnProcessor2_0,
37
+ )
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ class IPAdapterMixin:
43
+ """Mixin for handling IP Adapters."""
44
+
45
+ @validate_hf_hub_args
46
+ def load_ip_adapter(
47
+ self,
48
+ pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
49
+ subfolder: str,
50
+ weight_name: str,
51
+ **kwargs,
52
+ ):
53
+ """
54
+ Parameters:
55
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
56
+ Can be either:
57
+
58
+ - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
59
+ the Hub.
60
+ - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
61
+ with [`ModelMixin.save_pretrained`].
62
+ - A [torch state
63
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
64
+
65
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
66
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
67
+ is not used.
68
+ force_download (`bool`, *optional*, defaults to `False`):
69
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
70
+ cached versions if they exist.
71
+ resume_download (`bool`, *optional*, defaults to `False`):
72
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
73
+ incompletely downloaded files are deleted.
74
+ proxies (`Dict[str, str]`, *optional*):
75
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
76
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
77
+ local_files_only (`bool`, *optional*, defaults to `False`):
78
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
79
+ won't be downloaded from the Hub.
80
+ token (`str` or *bool*, *optional*):
81
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
82
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
83
+ revision (`str`, *optional*, defaults to `"main"`):
84
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
85
+ allowed by Git.
86
+ subfolder (`str`, *optional*, defaults to `""`):
87
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
88
+ """
89
+
90
+ # Load the main state dict first.
91
+ cache_dir = kwargs.pop("cache_dir", None)
92
+ force_download = kwargs.pop("force_download", False)
93
+ resume_download = kwargs.pop("resume_download", False)
94
+ proxies = kwargs.pop("proxies", None)
95
+ local_files_only = kwargs.pop("local_files_only", None)
96
+ token = kwargs.pop("token", None)
97
+ revision = kwargs.pop("revision", None)
98
+
99
+ user_agent = {
100
+ "file_type": "attn_procs_weights",
101
+ "framework": "pytorch",
102
+ }
103
+
104
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
105
+ model_file = _get_model_file(
106
+ pretrained_model_name_or_path_or_dict,
107
+ weights_name=weight_name,
108
+ cache_dir=cache_dir,
109
+ force_download=force_download,
110
+ resume_download=resume_download,
111
+ proxies=proxies,
112
+ local_files_only=local_files_only,
113
+ token=token,
114
+ revision=revision,
115
+ subfolder=subfolder,
116
+ user_agent=user_agent,
117
+ )
118
+ if weight_name.endswith(".safetensors"):
119
+ state_dict = {"image_proj": {}, "ip_adapter": {}}
120
+ with safe_open(model_file, framework="pt", device="cpu") as f:
121
+ for key in f.keys():
122
+ if key.startswith("image_proj."):
123
+ state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
124
+ elif key.startswith("ip_adapter."):
125
+ state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
126
+ else:
127
+ state_dict = torch.load(model_file, map_location="cpu")
128
+ else:
129
+ state_dict = pretrained_model_name_or_path_or_dict
130
+
131
+ keys = list(state_dict.keys())
132
+ if keys != ["image_proj", "ip_adapter"]:
133
+ raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.")
134
+
135
+ # load CLIP image encoder here if it has not been registered to the pipeline yet
136
+ if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None:
137
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
138
+ logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}")
139
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained(
140
+ pretrained_model_name_or_path_or_dict,
141
+ subfolder=Path(subfolder, "image_encoder").as_posix(),
142
+ ).to(self.device, dtype=self.dtype)
143
+ self.image_encoder = image_encoder
144
+ self.register_to_config(image_encoder=["transformers", "CLIPVisionModelWithProjection"])
145
+ else:
146
+ raise ValueError("`image_encoder` cannot be None when using IP Adapters.")
147
+
148
+ # create feature extractor if it has not been registered to the pipeline yet
149
+ if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None:
150
+ self.feature_extractor = CLIPImageProcessor()
151
+ self.register_to_config(feature_extractor=["transformers", "CLIPImageProcessor"])
152
+
153
+ # load ip-adapter into unet
154
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
155
+ unet._load_ip_adapter_weights(state_dict)
156
+
157
+ def set_ip_adapter_scale(self, scale):
158
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
159
+ for attn_processor in unet.attn_processors.values():
160
+ if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):
161
+ attn_processor.scale = scale
162
+
163
+ def unload_ip_adapter(self):
164
+ """
165
+ Unloads the IP Adapter weights
166
+
167
+ Examples:
168
+
169
+ ```python
170
+ >>> # Assuming `pipeline` is already loaded with the IP Adapter weights.
171
+ >>> pipeline.unload_ip_adapter()
172
+ >>> ...
173
+ ```
174
+ """
175
+ # remove CLIP image encoder
176
+ if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is not None:
177
+ self.image_encoder = None
178
+ self.register_to_config(image_encoder=[None, None])
179
+
180
+ # remove feature extractor
181
+ if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is not None:
182
+ self.feature_extractor = None
183
+ self.register_to_config(feature_extractor=[None, None])
184
+
185
+ # remove hidden encoder
186
+ self.unet.encoder_hid_proj = None
187
+ self.config.encoder_hid_dim_type = None
188
+
189
+ # restore original Unet attention processors layers
190
+ self.unet.set_default_attn_processor()
diffusers/loaders/lora.py ADDED
@@ -0,0 +1,1554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import inspect
15
+ import os
16
+ from contextlib import nullcontext
17
+ from pathlib import Path
18
+ from typing import Callable, Dict, List, Optional, Union
19
+
20
+ import safetensors
21
+ import torch
22
+ from huggingface_hub import model_info
23
+ from huggingface_hub.constants import HF_HUB_OFFLINE
24
+ from huggingface_hub.utils import validate_hf_hub_args
25
+ from packaging import version
26
+ from torch import nn
27
+
28
+ from .. import __version__
29
+ from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_model_dict_into_meta
30
+ from ..utils import (
31
+ USE_PEFT_BACKEND,
32
+ _get_model_file,
33
+ convert_state_dict_to_diffusers,
34
+ convert_state_dict_to_peft,
35
+ convert_unet_state_dict_to_peft,
36
+ delete_adapter_layers,
37
+ deprecate,
38
+ get_adapter_name,
39
+ get_peft_kwargs,
40
+ is_accelerate_available,
41
+ is_transformers_available,
42
+ logging,
43
+ recurse_remove_peft_layers,
44
+ scale_lora_layers,
45
+ set_adapter_layers,
46
+ set_weights_and_activate_adapters,
47
+ )
48
+ from .lora_conversion_utils import _convert_kohya_lora_to_diffusers, _maybe_map_sgm_blocks_to_diffusers
49
+
50
+
51
+ if is_transformers_available():
52
+ from transformers import PreTrainedModel
53
+
54
+ from ..models.lora import PatchedLoraProjection, text_encoder_attn_modules, text_encoder_mlp_modules
55
+
56
+ if is_accelerate_available():
57
+ from accelerate import init_empty_weights
58
+ from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
59
+
60
+ logger = logging.get_logger(__name__)
61
+
62
+ TEXT_ENCODER_NAME = "text_encoder"
63
+ UNET_NAME = "unet"
64
+ TRANSFORMER_NAME = "transformer"
65
+
66
+ LORA_WEIGHT_NAME = "pytorch_lora_weights.bin"
67
+ LORA_WEIGHT_NAME_SAFE = "pytorch_lora_weights.safetensors"
68
+
69
+ LORA_DEPRECATION_MESSAGE = "You are using an old version of LoRA backend. This will be deprecated in the next releases in favor of PEFT make sure to install the latest PEFT and transformers packages in the future."
70
+
71
+
72
+ class LoraLoaderMixin:
73
+ r"""
74
+ Load LoRA layers into [`UNet2DConditionModel`] and
75
+ [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel).
76
+ """
77
+
78
+ text_encoder_name = TEXT_ENCODER_NAME
79
+ unet_name = UNET_NAME
80
+ transformer_name = TRANSFORMER_NAME
81
+ num_fused_loras = 0
82
+
83
+ def load_lora_weights(
84
+ self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name=None, **kwargs
85
+ ):
86
+ """
87
+ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
88
+ `self.text_encoder`.
89
+
90
+ All kwargs are forwarded to `self.lora_state_dict`.
91
+
92
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
93
+
94
+ See [`~loaders.LoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into
95
+ `self.unet`.
96
+
97
+ See [`~loaders.LoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded
98
+ into `self.text_encoder`.
99
+
100
+ Parameters:
101
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
102
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`].
103
+ kwargs (`dict`, *optional*):
104
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`].
105
+ adapter_name (`str`, *optional*):
106
+ Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
107
+ `default_{i}` where i is the total number of adapters being loaded.
108
+ """
109
+ # First, ensure that the checkpoint is a compatible one and can be successfully loaded.
110
+ state_dict, network_alphas = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs)
111
+
112
+ is_correct_format = all("lora" in key for key in state_dict.keys())
113
+ if not is_correct_format:
114
+ raise ValueError("Invalid LoRA checkpoint.")
115
+
116
+ low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
117
+
118
+ self.load_lora_into_unet(
119
+ state_dict,
120
+ network_alphas=network_alphas,
121
+ unet=getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet,
122
+ low_cpu_mem_usage=low_cpu_mem_usage,
123
+ adapter_name=adapter_name,
124
+ _pipeline=self,
125
+ )
126
+ self.load_lora_into_text_encoder(
127
+ state_dict,
128
+ network_alphas=network_alphas,
129
+ text_encoder=getattr(self, self.text_encoder_name)
130
+ if not hasattr(self, "text_encoder")
131
+ else self.text_encoder,
132
+ lora_scale=self.lora_scale,
133
+ low_cpu_mem_usage=low_cpu_mem_usage,
134
+ adapter_name=adapter_name,
135
+ _pipeline=self,
136
+ )
137
+
138
+ @classmethod
139
+ @validate_hf_hub_args
140
+ def lora_state_dict(
141
+ cls,
142
+ pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
143
+ **kwargs,
144
+ ):
145
+ r"""
146
+ Return state dict for lora weights and the network alphas.
147
+
148
+ <Tip warning={true}>
149
+
150
+ We support loading A1111 formatted LoRA checkpoints in a limited capacity.
151
+
152
+ This function is experimental and might change in the future.
153
+
154
+ </Tip>
155
+
156
+ Parameters:
157
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
158
+ Can be either:
159
+
160
+ - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
161
+ the Hub.
162
+ - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
163
+ with [`ModelMixin.save_pretrained`].
164
+ - A [torch state
165
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
166
+
167
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
168
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
169
+ is not used.
170
+ force_download (`bool`, *optional*, defaults to `False`):
171
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
172
+ cached versions if they exist.
173
+ resume_download (`bool`, *optional*, defaults to `False`):
174
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
175
+ incompletely downloaded files are deleted.
176
+ proxies (`Dict[str, str]`, *optional*):
177
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
178
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
179
+ local_files_only (`bool`, *optional*, defaults to `False`):
180
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
181
+ won't be downloaded from the Hub.
182
+ token (`str` or *bool*, *optional*):
183
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
184
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
185
+ revision (`str`, *optional*, defaults to `"main"`):
186
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
187
+ allowed by Git.
188
+ subfolder (`str`, *optional*, defaults to `""`):
189
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
190
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
191
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
192
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
193
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
194
+ argument to `True` will raise an error.
195
+ mirror (`str`, *optional*):
196
+ Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
197
+ guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
198
+ information.
199
+
200
+ """
201
+ # Load the main state dict first which has the LoRA layers for either of
202
+ # UNet and text encoder or both.
203
+ cache_dir = kwargs.pop("cache_dir", None)
204
+ force_download = kwargs.pop("force_download", False)
205
+ resume_download = kwargs.pop("resume_download", False)
206
+ proxies = kwargs.pop("proxies", None)
207
+ local_files_only = kwargs.pop("local_files_only", None)
208
+ token = kwargs.pop("token", None)
209
+ revision = kwargs.pop("revision", None)
210
+ subfolder = kwargs.pop("subfolder", None)
211
+ weight_name = kwargs.pop("weight_name", None)
212
+ unet_config = kwargs.pop("unet_config", None)
213
+ use_safetensors = kwargs.pop("use_safetensors", None)
214
+
215
+ allow_pickle = False
216
+ if use_safetensors is None:
217
+ use_safetensors = True
218
+ allow_pickle = True
219
+
220
+ user_agent = {
221
+ "file_type": "attn_procs_weights",
222
+ "framework": "pytorch",
223
+ }
224
+
225
+ model_file = None
226
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
227
+ # Let's first try to load .safetensors weights
228
+ if (use_safetensors and weight_name is None) or (
229
+ weight_name is not None and weight_name.endswith(".safetensors")
230
+ ):
231
+ try:
232
+ # Here we're relaxing the loading check to enable more Inference API
233
+ # friendliness where sometimes, it's not at all possible to automatically
234
+ # determine `weight_name`.
235
+ if weight_name is None:
236
+ weight_name = cls._best_guess_weight_name(
237
+ pretrained_model_name_or_path_or_dict,
238
+ file_extension=".safetensors",
239
+ local_files_only=local_files_only,
240
+ )
241
+ model_file = _get_model_file(
242
+ pretrained_model_name_or_path_or_dict,
243
+ weights_name=weight_name or LORA_WEIGHT_NAME_SAFE,
244
+ cache_dir=cache_dir,
245
+ force_download=force_download,
246
+ resume_download=resume_download,
247
+ proxies=proxies,
248
+ local_files_only=local_files_only,
249
+ token=token,
250
+ revision=revision,
251
+ subfolder=subfolder,
252
+ user_agent=user_agent,
253
+ )
254
+ state_dict = safetensors.torch.load_file(model_file, device="cpu")
255
+ except (IOError, safetensors.SafetensorError) as e:
256
+ if not allow_pickle:
257
+ raise e
258
+ # try loading non-safetensors weights
259
+ model_file = None
260
+ pass
261
+
262
+ if model_file is None:
263
+ if weight_name is None:
264
+ weight_name = cls._best_guess_weight_name(
265
+ pretrained_model_name_or_path_or_dict, file_extension=".bin", local_files_only=local_files_only
266
+ )
267
+ model_file = _get_model_file(
268
+ pretrained_model_name_or_path_or_dict,
269
+ weights_name=weight_name or LORA_WEIGHT_NAME,
270
+ cache_dir=cache_dir,
271
+ force_download=force_download,
272
+ resume_download=resume_download,
273
+ proxies=proxies,
274
+ local_files_only=local_files_only,
275
+ token=token,
276
+ revision=revision,
277
+ subfolder=subfolder,
278
+ user_agent=user_agent,
279
+ )
280
+ state_dict = torch.load(model_file, map_location="cpu")
281
+ else:
282
+ state_dict = pretrained_model_name_or_path_or_dict
283
+
284
+ network_alphas = None
285
+ # TODO: replace it with a method from `state_dict_utils`
286
+ if all(
287
+ (
288
+ k.startswith("lora_te_")
289
+ or k.startswith("lora_unet_")
290
+ or k.startswith("lora_te1_")
291
+ or k.startswith("lora_te2_")
292
+ )
293
+ for k in state_dict.keys()
294
+ ):
295
+ # Map SDXL blocks correctly.
296
+ if unet_config is not None:
297
+ # use unet config to remap block numbers
298
+ state_dict = _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config)
299
+ state_dict, network_alphas = _convert_kohya_lora_to_diffusers(state_dict)
300
+
301
+ return state_dict, network_alphas
302
+
303
+ @classmethod
304
+ def _best_guess_weight_name(
305
+ cls, pretrained_model_name_or_path_or_dict, file_extension=".safetensors", local_files_only=False
306
+ ):
307
+ if local_files_only or HF_HUB_OFFLINE:
308
+ raise ValueError("When using the offline mode, you must specify a `weight_name`.")
309
+
310
+ targeted_files = []
311
+
312
+ if os.path.isfile(pretrained_model_name_or_path_or_dict):
313
+ return
314
+ elif os.path.isdir(pretrained_model_name_or_path_or_dict):
315
+ targeted_files = [
316
+ f for f in os.listdir(pretrained_model_name_or_path_or_dict) if f.endswith(file_extension)
317
+ ]
318
+ else:
319
+ files_in_repo = model_info(pretrained_model_name_or_path_or_dict).siblings
320
+ targeted_files = [f.rfilename for f in files_in_repo if f.rfilename.endswith(file_extension)]
321
+ if len(targeted_files) == 0:
322
+ return
323
+
324
+ # "scheduler" does not correspond to a LoRA checkpoint.
325
+ # "optimizer" does not correspond to a LoRA checkpoint
326
+ # only top-level checkpoints are considered and not the other ones, hence "checkpoint".
327
+ unallowed_substrings = {"scheduler", "optimizer", "checkpoint"}
328
+ targeted_files = list(
329
+ filter(lambda x: all(substring not in x for substring in unallowed_substrings), targeted_files)
330
+ )
331
+
332
+ if any(f.endswith(LORA_WEIGHT_NAME) for f in targeted_files):
333
+ targeted_files = list(filter(lambda x: x.endswith(LORA_WEIGHT_NAME), targeted_files))
334
+ elif any(f.endswith(LORA_WEIGHT_NAME_SAFE) for f in targeted_files):
335
+ targeted_files = list(filter(lambda x: x.endswith(LORA_WEIGHT_NAME_SAFE), targeted_files))
336
+
337
+ if len(targeted_files) > 1:
338
+ raise ValueError(
339
+ f"Provided path contains more than one weights file in the {file_extension} format. Either specify `weight_name` in `load_lora_weights` or make sure there's only one `.safetensors` or `.bin` file in {pretrained_model_name_or_path_or_dict}."
340
+ )
341
+ weight_name = targeted_files[0]
342
+ return weight_name
343
+
344
+ @classmethod
345
+ def _optionally_disable_offloading(cls, _pipeline):
346
+ """
347
+ Optionally removes offloading in case the pipeline has been already sequentially offloaded to CPU.
348
+
349
+ Args:
350
+ _pipeline (`DiffusionPipeline`):
351
+ The pipeline to disable offloading for.
352
+
353
+ Returns:
354
+ tuple:
355
+ A tuple indicating if `is_model_cpu_offload` or `is_sequential_cpu_offload` is True.
356
+ """
357
+ is_model_cpu_offload = False
358
+ is_sequential_cpu_offload = False
359
+
360
+ if _pipeline is not None:
361
+ for _, component in _pipeline.components.items():
362
+ if isinstance(component, nn.Module) and hasattr(component, "_hf_hook"):
363
+ if not is_model_cpu_offload:
364
+ is_model_cpu_offload = isinstance(component._hf_hook, CpuOffload)
365
+ if not is_sequential_cpu_offload:
366
+ is_sequential_cpu_offload = isinstance(component._hf_hook, AlignDevicesHook)
367
+
368
+ logger.info(
369
+ "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
370
+ )
371
+ remove_hook_from_module(component, recurse=is_sequential_cpu_offload)
372
+
373
+ return (is_model_cpu_offload, is_sequential_cpu_offload)
374
+
375
+ @classmethod
376
+ def load_lora_into_unet(
377
+ cls, state_dict, network_alphas, unet, low_cpu_mem_usage=None, adapter_name=None, _pipeline=None
378
+ ):
379
+ """
380
+ This will load the LoRA layers specified in `state_dict` into `unet`.
381
+
382
+ Parameters:
383
+ state_dict (`dict`):
384
+ A standard state dict containing the lora layer parameters. The keys can either be indexed directly
385
+ into the unet or prefixed with an additional `unet` which can be used to distinguish between text
386
+ encoder lora layers.
387
+ network_alphas (`Dict[str, float]`):
388
+ See `LoRALinearLayer` for more details.
389
+ unet (`UNet2DConditionModel`):
390
+ The UNet model to load the LoRA layers into.
391
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
392
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
393
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
394
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
395
+ argument to `True` will raise an error.
396
+ adapter_name (`str`, *optional*):
397
+ Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
398
+ `default_{i}` where i is the total number of adapters being loaded.
399
+ """
400
+ low_cpu_mem_usage = low_cpu_mem_usage if low_cpu_mem_usage is not None else _LOW_CPU_MEM_USAGE_DEFAULT
401
+ # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918),
402
+ # then the `state_dict` keys should have `cls.unet_name` and/or `cls.text_encoder_name` as
403
+ # their prefixes.
404
+ keys = list(state_dict.keys())
405
+
406
+ if all(key.startswith("unet.unet") for key in keys):
407
+ deprecation_message = "Keys starting with 'unet.unet' are deprecated."
408
+ deprecate("unet.unet keys", "0.27", deprecation_message)
409
+
410
+ if all(key.startswith(cls.unet_name) or key.startswith(cls.text_encoder_name) for key in keys):
411
+ # Load the layers corresponding to UNet.
412
+ logger.info(f"Loading {cls.unet_name}.")
413
+
414
+ unet_keys = [k for k in keys if k.startswith(cls.unet_name)]
415
+ state_dict = {k.replace(f"{cls.unet_name}.", ""): v for k, v in state_dict.items() if k in unet_keys}
416
+
417
+ if network_alphas is not None:
418
+ alpha_keys = [k for k in network_alphas.keys() if k.startswith(cls.unet_name)]
419
+ network_alphas = {
420
+ k.replace(f"{cls.unet_name}.", ""): v for k, v in network_alphas.items() if k in alpha_keys
421
+ }
422
+
423
+ else:
424
+ # Otherwise, we're dealing with the old format. This means the `state_dict` should only
425
+ # contain the module names of the `unet` as its keys WITHOUT any prefix.
426
+ if not USE_PEFT_BACKEND:
427
+ warn_message = "You have saved the LoRA weights using the old format. To convert the old LoRA weights to the new format, you can first load them in a dictionary and then create a new dictionary like the following: `new_state_dict = {f'unet.{module_name}': params for module_name, params in old_state_dict.items()}`."
428
+ logger.warn(warn_message)
429
+
430
+ if USE_PEFT_BACKEND and len(state_dict.keys()) > 0:
431
+ from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict
432
+
433
+ if adapter_name in getattr(unet, "peft_config", {}):
434
+ raise ValueError(
435
+ f"Adapter name {adapter_name} already in use in the Unet - please select a new adapter name."
436
+ )
437
+
438
+ state_dict = convert_unet_state_dict_to_peft(state_dict)
439
+
440
+ if network_alphas is not None:
441
+ # The alphas state dict have the same structure as Unet, thus we convert it to peft format using
442
+ # `convert_unet_state_dict_to_peft` method.
443
+ network_alphas = convert_unet_state_dict_to_peft(network_alphas)
444
+
445
+ rank = {}
446
+ for key, val in state_dict.items():
447
+ if "lora_B" in key:
448
+ rank[key] = val.shape[1]
449
+
450
+ lora_config_kwargs = get_peft_kwargs(rank, network_alphas, state_dict, is_unet=True)
451
+ lora_config = LoraConfig(**lora_config_kwargs)
452
+
453
+ # adapter_name
454
+ if adapter_name is None:
455
+ adapter_name = get_adapter_name(unet)
456
+
457
+ # In case the pipeline has been already offloaded to CPU - temporarily remove the hooks
458
+ # otherwise loading LoRA weights will lead to an error
459
+ is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline)
460
+
461
+ inject_adapter_in_model(lora_config, unet, adapter_name=adapter_name)
462
+ incompatible_keys = set_peft_model_state_dict(unet, state_dict, adapter_name)
463
+
464
+ if incompatible_keys is not None:
465
+ # check only for unexpected keys
466
+ unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
467
+ if unexpected_keys:
468
+ logger.warning(
469
+ f"Loading adapter weights from state_dict led to unexpected keys not found in the model: "
470
+ f" {unexpected_keys}. "
471
+ )
472
+
473
+ # Offload back.
474
+ if is_model_cpu_offload:
475
+ _pipeline.enable_model_cpu_offload()
476
+ elif is_sequential_cpu_offload:
477
+ _pipeline.enable_sequential_cpu_offload()
478
+ # Unsafe code />
479
+
480
+ unet.load_attn_procs(
481
+ state_dict, network_alphas=network_alphas, low_cpu_mem_usage=low_cpu_mem_usage, _pipeline=_pipeline
482
+ )
483
+
484
+ @classmethod
485
+ def load_lora_into_text_encoder(
486
+ cls,
487
+ state_dict,
488
+ network_alphas,
489
+ text_encoder,
490
+ prefix=None,
491
+ lora_scale=1.0,
492
+ low_cpu_mem_usage=None,
493
+ adapter_name=None,
494
+ _pipeline=None,
495
+ ):
496
+ """
497
+ This will load the LoRA layers specified in `state_dict` into `text_encoder`
498
+
499
+ Parameters:
500
+ state_dict (`dict`):
501
+ A standard state dict containing the lora layer parameters. The key should be prefixed with an
502
+ additional `text_encoder` to distinguish between unet lora layers.
503
+ network_alphas (`Dict[str, float]`):
504
+ See `LoRALinearLayer` for more details.
505
+ text_encoder (`CLIPTextModel`):
506
+ The text encoder model to load the LoRA layers into.
507
+ prefix (`str`):
508
+ Expected prefix of the `text_encoder` in the `state_dict`.
509
+ lora_scale (`float`):
510
+ How much to scale the output of the lora linear layer before it is added with the output of the regular
511
+ lora layer.
512
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
513
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
514
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
515
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
516
+ argument to `True` will raise an error.
517
+ adapter_name (`str`, *optional*):
518
+ Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
519
+ `default_{i}` where i is the total number of adapters being loaded.
520
+ """
521
+ low_cpu_mem_usage = low_cpu_mem_usage if low_cpu_mem_usage is not None else _LOW_CPU_MEM_USAGE_DEFAULT
522
+
523
+ # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918),
524
+ # then the `state_dict` keys should have `self.unet_name` and/or `self.text_encoder_name` as
525
+ # their prefixes.
526
+ keys = list(state_dict.keys())
527
+ prefix = cls.text_encoder_name if prefix is None else prefix
528
+
529
+ # Safe prefix to check with.
530
+ if any(cls.text_encoder_name in key for key in keys):
531
+ # Load the layers corresponding to text encoder and make necessary adjustments.
532
+ text_encoder_keys = [k for k in keys if k.startswith(prefix) and k.split(".")[0] == prefix]
533
+ text_encoder_lora_state_dict = {
534
+ k.replace(f"{prefix}.", ""): v for k, v in state_dict.items() if k in text_encoder_keys
535
+ }
536
+
537
+ if len(text_encoder_lora_state_dict) > 0:
538
+ logger.info(f"Loading {prefix}.")
539
+ rank = {}
540
+ text_encoder_lora_state_dict = convert_state_dict_to_diffusers(text_encoder_lora_state_dict)
541
+
542
+ if USE_PEFT_BACKEND:
543
+ # convert state dict
544
+ text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict)
545
+
546
+ for name, _ in text_encoder_attn_modules(text_encoder):
547
+ rank_key = f"{name}.out_proj.lora_B.weight"
548
+ rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1]
549
+
550
+ patch_mlp = any(".mlp." in key for key in text_encoder_lora_state_dict.keys())
551
+ if patch_mlp:
552
+ for name, _ in text_encoder_mlp_modules(text_encoder):
553
+ rank_key_fc1 = f"{name}.fc1.lora_B.weight"
554
+ rank_key_fc2 = f"{name}.fc2.lora_B.weight"
555
+
556
+ rank[rank_key_fc1] = text_encoder_lora_state_dict[rank_key_fc1].shape[1]
557
+ rank[rank_key_fc2] = text_encoder_lora_state_dict[rank_key_fc2].shape[1]
558
+ else:
559
+ for name, _ in text_encoder_attn_modules(text_encoder):
560
+ rank_key = f"{name}.out_proj.lora_linear_layer.up.weight"
561
+ rank.update({rank_key: text_encoder_lora_state_dict[rank_key].shape[1]})
562
+
563
+ patch_mlp = any(".mlp." in key for key in text_encoder_lora_state_dict.keys())
564
+ if patch_mlp:
565
+ for name, _ in text_encoder_mlp_modules(text_encoder):
566
+ rank_key_fc1 = f"{name}.fc1.lora_linear_layer.up.weight"
567
+ rank_key_fc2 = f"{name}.fc2.lora_linear_layer.up.weight"
568
+ rank[rank_key_fc1] = text_encoder_lora_state_dict[rank_key_fc1].shape[1]
569
+ rank[rank_key_fc2] = text_encoder_lora_state_dict[rank_key_fc2].shape[1]
570
+
571
+ if network_alphas is not None:
572
+ alpha_keys = [
573
+ k for k in network_alphas.keys() if k.startswith(prefix) and k.split(".")[0] == prefix
574
+ ]
575
+ network_alphas = {
576
+ k.replace(f"{prefix}.", ""): v for k, v in network_alphas.items() if k in alpha_keys
577
+ }
578
+
579
+ if USE_PEFT_BACKEND:
580
+ from peft import LoraConfig
581
+
582
+ lora_config_kwargs = get_peft_kwargs(
583
+ rank, network_alphas, text_encoder_lora_state_dict, is_unet=False
584
+ )
585
+ lora_config = LoraConfig(**lora_config_kwargs)
586
+
587
+ # adapter_name
588
+ if adapter_name is None:
589
+ adapter_name = get_adapter_name(text_encoder)
590
+
591
+ is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline)
592
+
593
+ # inject LoRA layers and load the state dict
594
+ # in transformers we automatically check whether the adapter name is already in use or not
595
+ text_encoder.load_adapter(
596
+ adapter_name=adapter_name,
597
+ adapter_state_dict=text_encoder_lora_state_dict,
598
+ peft_config=lora_config,
599
+ )
600
+
601
+ # scale LoRA layers with `lora_scale`
602
+ scale_lora_layers(text_encoder, weight=lora_scale)
603
+ else:
604
+ cls._modify_text_encoder(
605
+ text_encoder,
606
+ lora_scale,
607
+ network_alphas,
608
+ rank=rank,
609
+ patch_mlp=patch_mlp,
610
+ low_cpu_mem_usage=low_cpu_mem_usage,
611
+ )
612
+
613
+ is_pipeline_offloaded = _pipeline is not None and any(
614
+ isinstance(c, torch.nn.Module) and hasattr(c, "_hf_hook")
615
+ for c in _pipeline.components.values()
616
+ )
617
+ if is_pipeline_offloaded and low_cpu_mem_usage:
618
+ low_cpu_mem_usage = True
619
+ logger.info(
620
+ f"Pipeline {_pipeline.__class__} is offloaded. Therefore low cpu mem usage loading is forced."
621
+ )
622
+
623
+ if low_cpu_mem_usage:
624
+ device = next(iter(text_encoder_lora_state_dict.values())).device
625
+ dtype = next(iter(text_encoder_lora_state_dict.values())).dtype
626
+ unexpected_keys = load_model_dict_into_meta(
627
+ text_encoder, text_encoder_lora_state_dict, device=device, dtype=dtype
628
+ )
629
+ else:
630
+ load_state_dict_results = text_encoder.load_state_dict(
631
+ text_encoder_lora_state_dict, strict=False
632
+ )
633
+ unexpected_keys = load_state_dict_results.unexpected_keys
634
+
635
+ if len(unexpected_keys) != 0:
636
+ raise ValueError(
637
+ f"failed to load text encoder state dict, unexpected keys: {load_state_dict_results.unexpected_keys}"
638
+ )
639
+
640
+ # <Unsafe code
641
+ # We can be sure that the following works as all we do is change the dtype and device of the text encoder
642
+ # Now we remove any existing hooks to
643
+ is_model_cpu_offload = False
644
+ is_sequential_cpu_offload = False
645
+ if _pipeline is not None:
646
+ for _, component in _pipeline.components.items():
647
+ if isinstance(component, torch.nn.Module):
648
+ if hasattr(component, "_hf_hook"):
649
+ is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
650
+ is_sequential_cpu_offload = isinstance(
651
+ getattr(component, "_hf_hook"), AlignDevicesHook
652
+ )
653
+ logger.info(
654
+ "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
655
+ )
656
+ remove_hook_from_module(component, recurse=is_sequential_cpu_offload)
657
+
658
+ text_encoder.to(device=text_encoder.device, dtype=text_encoder.dtype)
659
+
660
+ # Offload back.
661
+ if is_model_cpu_offload:
662
+ _pipeline.enable_model_cpu_offload()
663
+ elif is_sequential_cpu_offload:
664
+ _pipeline.enable_sequential_cpu_offload()
665
+ # Unsafe code />
666
+
667
+ @classmethod
668
+ def load_lora_into_transformer(
669
+ cls, state_dict, network_alphas, transformer, low_cpu_mem_usage=None, adapter_name=None, _pipeline=None
670
+ ):
671
+ """
672
+ This will load the LoRA layers specified in `state_dict` into `transformer`.
673
+
674
+ Parameters:
675
+ state_dict (`dict`):
676
+ A standard state dict containing the lora layer parameters. The keys can either be indexed directly
677
+ into the unet or prefixed with an additional `unet` which can be used to distinguish between text
678
+ encoder lora layers.
679
+ network_alphas (`Dict[str, float]`):
680
+ See `LoRALinearLayer` for more details.
681
+ unet (`UNet2DConditionModel`):
682
+ The UNet model to load the LoRA layers into.
683
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
684
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
685
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
686
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
687
+ argument to `True` will raise an error.
688
+ adapter_name (`str`, *optional*):
689
+ Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
690
+ `default_{i}` where i is the total number of adapters being loaded.
691
+ """
692
+ low_cpu_mem_usage = low_cpu_mem_usage if low_cpu_mem_usage is not None else _LOW_CPU_MEM_USAGE_DEFAULT
693
+
694
+ keys = list(state_dict.keys())
695
+
696
+ transformer_keys = [k for k in keys if k.startswith(cls.transformer_name)]
697
+ state_dict = {
698
+ k.replace(f"{cls.transformer_name}.", ""): v for k, v in state_dict.items() if k in transformer_keys
699
+ }
700
+
701
+ if network_alphas is not None:
702
+ alpha_keys = [k for k in network_alphas.keys() if k.startswith(cls.transformer_name)]
703
+ network_alphas = {
704
+ k.replace(f"{cls.transformer_name}.", ""): v for k, v in network_alphas.items() if k in alpha_keys
705
+ }
706
+
707
+ if len(state_dict.keys()) > 0:
708
+ from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict
709
+
710
+ if adapter_name in getattr(transformer, "peft_config", {}):
711
+ raise ValueError(
712
+ f"Adapter name {adapter_name} already in use in the transformer - please select a new adapter name."
713
+ )
714
+
715
+ rank = {}
716
+ for key, val in state_dict.items():
717
+ if "lora_B" in key:
718
+ rank[key] = val.shape[1]
719
+
720
+ lora_config_kwargs = get_peft_kwargs(rank, network_alphas, state_dict)
721
+ lora_config = LoraConfig(**lora_config_kwargs)
722
+
723
+ # adapter_name
724
+ if adapter_name is None:
725
+ adapter_name = get_adapter_name(transformer)
726
+
727
+ # In case the pipeline has been already offloaded to CPU - temporarily remove the hooks
728
+ # otherwise loading LoRA weights will lead to an error
729
+ is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline)
730
+
731
+ inject_adapter_in_model(lora_config, transformer, adapter_name=adapter_name)
732
+ incompatible_keys = set_peft_model_state_dict(transformer, state_dict, adapter_name)
733
+
734
+ if incompatible_keys is not None:
735
+ # check only for unexpected keys
736
+ unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
737
+ if unexpected_keys:
738
+ logger.warning(
739
+ f"Loading adapter weights from state_dict led to unexpected keys not found in the model: "
740
+ f" {unexpected_keys}. "
741
+ )
742
+
743
+ # Offload back.
744
+ if is_model_cpu_offload:
745
+ _pipeline.enable_model_cpu_offload()
746
+ elif is_sequential_cpu_offload:
747
+ _pipeline.enable_sequential_cpu_offload()
748
+ # Unsafe code />
749
+
750
+ @property
751
+ def lora_scale(self) -> float:
752
+ # property function that returns the lora scale which can be set at run time by the pipeline.
753
+ # if _lora_scale has not been set, return 1
754
+ return self._lora_scale if hasattr(self, "_lora_scale") else 1.0
755
+
756
+ def _remove_text_encoder_monkey_patch(self):
757
+ if USE_PEFT_BACKEND:
758
+ remove_method = recurse_remove_peft_layers
759
+ else:
760
+ remove_method = self._remove_text_encoder_monkey_patch_classmethod
761
+
762
+ if hasattr(self, "text_encoder"):
763
+ remove_method(self.text_encoder)
764
+
765
+ # In case text encoder have no Lora attached
766
+ if USE_PEFT_BACKEND and getattr(self.text_encoder, "peft_config", None) is not None:
767
+ del self.text_encoder.peft_config
768
+ self.text_encoder._hf_peft_config_loaded = None
769
+ if hasattr(self, "text_encoder_2"):
770
+ remove_method(self.text_encoder_2)
771
+ if USE_PEFT_BACKEND:
772
+ del self.text_encoder_2.peft_config
773
+ self.text_encoder_2._hf_peft_config_loaded = None
774
+
775
+ @classmethod
776
+ def _remove_text_encoder_monkey_patch_classmethod(cls, text_encoder):
777
+ deprecate("_remove_text_encoder_monkey_patch_classmethod", "0.27", LORA_DEPRECATION_MESSAGE)
778
+
779
+ for _, attn_module in text_encoder_attn_modules(text_encoder):
780
+ if isinstance(attn_module.q_proj, PatchedLoraProjection):
781
+ attn_module.q_proj.lora_linear_layer = None
782
+ attn_module.k_proj.lora_linear_layer = None
783
+ attn_module.v_proj.lora_linear_layer = None
784
+ attn_module.out_proj.lora_linear_layer = None
785
+
786
+ for _, mlp_module in text_encoder_mlp_modules(text_encoder):
787
+ if isinstance(mlp_module.fc1, PatchedLoraProjection):
788
+ mlp_module.fc1.lora_linear_layer = None
789
+ mlp_module.fc2.lora_linear_layer = None
790
+
791
+ @classmethod
792
+ def _modify_text_encoder(
793
+ cls,
794
+ text_encoder,
795
+ lora_scale=1,
796
+ network_alphas=None,
797
+ rank: Union[Dict[str, int], int] = 4,
798
+ dtype=None,
799
+ patch_mlp=False,
800
+ low_cpu_mem_usage=False,
801
+ ):
802
+ r"""
803
+ Monkey-patches the forward passes of attention modules of the text encoder.
804
+ """
805
+ deprecate("_modify_text_encoder", "0.27", LORA_DEPRECATION_MESSAGE)
806
+
807
+ def create_patched_linear_lora(model, network_alpha, rank, dtype, lora_parameters):
808
+ linear_layer = model.regular_linear_layer if isinstance(model, PatchedLoraProjection) else model
809
+ ctx = init_empty_weights if low_cpu_mem_usage else nullcontext
810
+ with ctx():
811
+ model = PatchedLoraProjection(linear_layer, lora_scale, network_alpha, rank, dtype=dtype)
812
+
813
+ lora_parameters.extend(model.lora_linear_layer.parameters())
814
+ return model
815
+
816
+ # First, remove any monkey-patch that might have been applied before
817
+ cls._remove_text_encoder_monkey_patch_classmethod(text_encoder)
818
+
819
+ lora_parameters = []
820
+ network_alphas = {} if network_alphas is None else network_alphas
821
+ is_network_alphas_populated = len(network_alphas) > 0
822
+
823
+ for name, attn_module in text_encoder_attn_modules(text_encoder):
824
+ query_alpha = network_alphas.pop(name + ".to_q_lora.down.weight.alpha", None)
825
+ key_alpha = network_alphas.pop(name + ".to_k_lora.down.weight.alpha", None)
826
+ value_alpha = network_alphas.pop(name + ".to_v_lora.down.weight.alpha", None)
827
+ out_alpha = network_alphas.pop(name + ".to_out_lora.down.weight.alpha", None)
828
+
829
+ if isinstance(rank, dict):
830
+ current_rank = rank.pop(f"{name}.out_proj.lora_linear_layer.up.weight")
831
+ else:
832
+ current_rank = rank
833
+
834
+ attn_module.q_proj = create_patched_linear_lora(
835
+ attn_module.q_proj, query_alpha, current_rank, dtype, lora_parameters
836
+ )
837
+ attn_module.k_proj = create_patched_linear_lora(
838
+ attn_module.k_proj, key_alpha, current_rank, dtype, lora_parameters
839
+ )
840
+ attn_module.v_proj = create_patched_linear_lora(
841
+ attn_module.v_proj, value_alpha, current_rank, dtype, lora_parameters
842
+ )
843
+ attn_module.out_proj = create_patched_linear_lora(
844
+ attn_module.out_proj, out_alpha, current_rank, dtype, lora_parameters
845
+ )
846
+
847
+ if patch_mlp:
848
+ for name, mlp_module in text_encoder_mlp_modules(text_encoder):
849
+ fc1_alpha = network_alphas.pop(name + ".fc1.lora_linear_layer.down.weight.alpha", None)
850
+ fc2_alpha = network_alphas.pop(name + ".fc2.lora_linear_layer.down.weight.alpha", None)
851
+
852
+ current_rank_fc1 = rank.pop(f"{name}.fc1.lora_linear_layer.up.weight")
853
+ current_rank_fc2 = rank.pop(f"{name}.fc2.lora_linear_layer.up.weight")
854
+
855
+ mlp_module.fc1 = create_patched_linear_lora(
856
+ mlp_module.fc1, fc1_alpha, current_rank_fc1, dtype, lora_parameters
857
+ )
858
+ mlp_module.fc2 = create_patched_linear_lora(
859
+ mlp_module.fc2, fc2_alpha, current_rank_fc2, dtype, lora_parameters
860
+ )
861
+
862
+ if is_network_alphas_populated and len(network_alphas) > 0:
863
+ raise ValueError(
864
+ f"The `network_alphas` has to be empty at this point but has the following keys \n\n {', '.join(network_alphas.keys())}"
865
+ )
866
+
867
+ return lora_parameters
868
+
869
+ @classmethod
870
+ def save_lora_weights(
871
+ cls,
872
+ save_directory: Union[str, os.PathLike],
873
+ unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
874
+ text_encoder_lora_layers: Dict[str, torch.nn.Module] = None,
875
+ transformer_lora_layers: Dict[str, torch.nn.Module] = None,
876
+ is_main_process: bool = True,
877
+ weight_name: str = None,
878
+ save_function: Callable = None,
879
+ safe_serialization: bool = True,
880
+ ):
881
+ r"""
882
+ Save the LoRA parameters corresponding to the UNet and text encoder.
883
+
884
+ Arguments:
885
+ save_directory (`str` or `os.PathLike`):
886
+ Directory to save LoRA parameters to. Will be created if it doesn't exist.
887
+ unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
888
+ State dict of the LoRA layers corresponding to the `unet`.
889
+ text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
890
+ State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
891
+ encoder LoRA state dict because it comes from 🤗 Transformers.
892
+ is_main_process (`bool`, *optional*, defaults to `True`):
893
+ Whether the process calling this is the main process or not. Useful during distributed training and you
894
+ need to call this function on all processes. In this case, set `is_main_process=True` only on the main
895
+ process to avoid race conditions.
896
+ save_function (`Callable`):
897
+ The function to use to save the state dictionary. Useful during distributed training when you need to
898
+ replace `torch.save` with another method. Can be configured with the environment variable
899
+ `DIFFUSERS_SAVE_MODE`.
900
+ safe_serialization (`bool`, *optional*, defaults to `True`):
901
+ Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
902
+ """
903
+ state_dict = {}
904
+
905
+ def pack_weights(layers, prefix):
906
+ layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
907
+ layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
908
+ return layers_state_dict
909
+
910
+ if not (unet_lora_layers or text_encoder_lora_layers or transformer_lora_layers):
911
+ raise ValueError(
912
+ "You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers`, or `transformer_lora_layers`."
913
+ )
914
+
915
+ if unet_lora_layers:
916
+ state_dict.update(pack_weights(unet_lora_layers, cls.unet_name))
917
+
918
+ if text_encoder_lora_layers:
919
+ state_dict.update(pack_weights(text_encoder_lora_layers, cls.text_encoder_name))
920
+
921
+ if transformer_lora_layers:
922
+ state_dict.update(pack_weights(transformer_lora_layers, "transformer"))
923
+
924
+ # Save the model
925
+ cls.write_lora_layers(
926
+ state_dict=state_dict,
927
+ save_directory=save_directory,
928
+ is_main_process=is_main_process,
929
+ weight_name=weight_name,
930
+ save_function=save_function,
931
+ safe_serialization=safe_serialization,
932
+ )
933
+
934
+ @staticmethod
935
+ def write_lora_layers(
936
+ state_dict: Dict[str, torch.Tensor],
937
+ save_directory: str,
938
+ is_main_process: bool,
939
+ weight_name: str,
940
+ save_function: Callable,
941
+ safe_serialization: bool,
942
+ ):
943
+ if os.path.isfile(save_directory):
944
+ logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
945
+ return
946
+
947
+ if save_function is None:
948
+ if safe_serialization:
949
+
950
+ def save_function(weights, filename):
951
+ return safetensors.torch.save_file(weights, filename, metadata={"format": "pt"})
952
+
953
+ else:
954
+ save_function = torch.save
955
+
956
+ os.makedirs(save_directory, exist_ok=True)
957
+
958
+ if weight_name is None:
959
+ if safe_serialization:
960
+ weight_name = LORA_WEIGHT_NAME_SAFE
961
+ else:
962
+ weight_name = LORA_WEIGHT_NAME
963
+
964
+ save_path = Path(save_directory, weight_name).as_posix()
965
+ save_function(state_dict, save_path)
966
+ logger.info(f"Model weights saved in {save_path}")
967
+
968
+ def unload_lora_weights(self):
969
+ """
970
+ Unloads the LoRA parameters.
971
+
972
+ Examples:
973
+
974
+ ```python
975
+ >>> # Assuming `pipeline` is already loaded with the LoRA parameters.
976
+ >>> pipeline.unload_lora_weights()
977
+ >>> ...
978
+ ```
979
+ """
980
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
981
+
982
+ if not USE_PEFT_BACKEND:
983
+ if version.parse(__version__) > version.parse("0.23"):
984
+ logger.warning(
985
+ "You are using `unload_lora_weights` to disable and unload lora weights. If you want to iteratively enable and disable adapter weights,"
986
+ "you can use `pipe.enable_lora()` or `pipe.disable_lora()`. After installing the latest version of PEFT."
987
+ )
988
+
989
+ for _, module in unet.named_modules():
990
+ if hasattr(module, "set_lora_layer"):
991
+ module.set_lora_layer(None)
992
+ else:
993
+ recurse_remove_peft_layers(unet)
994
+ if hasattr(unet, "peft_config"):
995
+ del unet.peft_config
996
+
997
+ # Safe to call the following regardless of LoRA.
998
+ self._remove_text_encoder_monkey_patch()
999
+
1000
+ def fuse_lora(
1001
+ self,
1002
+ fuse_unet: bool = True,
1003
+ fuse_text_encoder: bool = True,
1004
+ lora_scale: float = 1.0,
1005
+ safe_fusing: bool = False,
1006
+ adapter_names: Optional[List[str]] = None,
1007
+ ):
1008
+ r"""
1009
+ Fuses the LoRA parameters into the original parameters of the corresponding blocks.
1010
+
1011
+ <Tip warning={true}>
1012
+
1013
+ This is an experimental API.
1014
+
1015
+ </Tip>
1016
+
1017
+ Args:
1018
+ fuse_unet (`bool`, defaults to `True`): Whether to fuse the UNet LoRA parameters.
1019
+ fuse_text_encoder (`bool`, defaults to `True`):
1020
+ Whether to fuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
1021
+ LoRA parameters then it won't have any effect.
1022
+ lora_scale (`float`, defaults to 1.0):
1023
+ Controls how much to influence the outputs with the LoRA parameters.
1024
+ safe_fusing (`bool`, defaults to `False`):
1025
+ Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
1026
+ adapter_names (`List[str]`, *optional*):
1027
+ Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
1028
+
1029
+ Example:
1030
+
1031
+ ```py
1032
+ from diffusers import DiffusionPipeline
1033
+ import torch
1034
+
1035
+ pipeline = DiffusionPipeline.from_pretrained(
1036
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
1037
+ ).to("cuda")
1038
+ pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
1039
+ pipeline.fuse_lora(lora_scale=0.7)
1040
+ ```
1041
+ """
1042
+ if fuse_unet or fuse_text_encoder:
1043
+ self.num_fused_loras += 1
1044
+ if self.num_fused_loras > 1:
1045
+ logger.warn(
1046
+ "The current API is supported for operating with a single LoRA file. You are trying to load and fuse more than one LoRA which is not well-supported.",
1047
+ )
1048
+
1049
+ if fuse_unet:
1050
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1051
+ unet.fuse_lora(lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names)
1052
+
1053
+ if USE_PEFT_BACKEND:
1054
+ from peft.tuners.tuners_utils import BaseTunerLayer
1055
+
1056
+ def fuse_text_encoder_lora(text_encoder, lora_scale=1.0, safe_fusing=False, adapter_names=None):
1057
+ merge_kwargs = {"safe_merge": safe_fusing}
1058
+
1059
+ for module in text_encoder.modules():
1060
+ if isinstance(module, BaseTunerLayer):
1061
+ if lora_scale != 1.0:
1062
+ module.scale_layer(lora_scale)
1063
+
1064
+ # For BC with previous PEFT versions, we need to check the signature
1065
+ # of the `merge` method to see if it supports the `adapter_names` argument.
1066
+ supported_merge_kwargs = list(inspect.signature(module.merge).parameters)
1067
+ if "adapter_names" in supported_merge_kwargs:
1068
+ merge_kwargs["adapter_names"] = adapter_names
1069
+ elif "adapter_names" not in supported_merge_kwargs and adapter_names is not None:
1070
+ raise ValueError(
1071
+ "The `adapter_names` argument is not supported with your PEFT version. "
1072
+ "Please upgrade to the latest version of PEFT. `pip install -U peft`"
1073
+ )
1074
+
1075
+ module.merge(**merge_kwargs)
1076
+
1077
+ else:
1078
+ deprecate("fuse_text_encoder_lora", "0.27", LORA_DEPRECATION_MESSAGE)
1079
+
1080
+ def fuse_text_encoder_lora(text_encoder, lora_scale=1.0, safe_fusing=False, **kwargs):
1081
+ if "adapter_names" in kwargs and kwargs["adapter_names"] is not None:
1082
+ raise ValueError(
1083
+ "The `adapter_names` argument is not supported in your environment. Please switch to PEFT "
1084
+ "backend to use this argument by installing latest PEFT and transformers."
1085
+ " `pip install -U peft transformers`"
1086
+ )
1087
+
1088
+ for _, attn_module in text_encoder_attn_modules(text_encoder):
1089
+ if isinstance(attn_module.q_proj, PatchedLoraProjection):
1090
+ attn_module.q_proj._fuse_lora(lora_scale, safe_fusing)
1091
+ attn_module.k_proj._fuse_lora(lora_scale, safe_fusing)
1092
+ attn_module.v_proj._fuse_lora(lora_scale, safe_fusing)
1093
+ attn_module.out_proj._fuse_lora(lora_scale, safe_fusing)
1094
+
1095
+ for _, mlp_module in text_encoder_mlp_modules(text_encoder):
1096
+ if isinstance(mlp_module.fc1, PatchedLoraProjection):
1097
+ mlp_module.fc1._fuse_lora(lora_scale, safe_fusing)
1098
+ mlp_module.fc2._fuse_lora(lora_scale, safe_fusing)
1099
+
1100
+ if fuse_text_encoder:
1101
+ if hasattr(self, "text_encoder"):
1102
+ fuse_text_encoder_lora(self.text_encoder, lora_scale, safe_fusing, adapter_names=adapter_names)
1103
+ if hasattr(self, "text_encoder_2"):
1104
+ fuse_text_encoder_lora(self.text_encoder_2, lora_scale, safe_fusing, adapter_names=adapter_names)
1105
+
1106
+ def unfuse_lora(self, unfuse_unet: bool = True, unfuse_text_encoder: bool = True):
1107
+ r"""
1108
+ Reverses the effect of
1109
+ [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraLoaderMixin.fuse_lora).
1110
+
1111
+ <Tip warning={true}>
1112
+
1113
+ This is an experimental API.
1114
+
1115
+ </Tip>
1116
+
1117
+ Args:
1118
+ unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
1119
+ unfuse_text_encoder (`bool`, defaults to `True`):
1120
+ Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
1121
+ LoRA parameters then it won't have any effect.
1122
+ """
1123
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1124
+ if unfuse_unet:
1125
+ if not USE_PEFT_BACKEND:
1126
+ unet.unfuse_lora()
1127
+ else:
1128
+ from peft.tuners.tuners_utils import BaseTunerLayer
1129
+
1130
+ for module in unet.modules():
1131
+ if isinstance(module, BaseTunerLayer):
1132
+ module.unmerge()
1133
+
1134
+ if USE_PEFT_BACKEND:
1135
+ from peft.tuners.tuners_utils import BaseTunerLayer
1136
+
1137
+ def unfuse_text_encoder_lora(text_encoder):
1138
+ for module in text_encoder.modules():
1139
+ if isinstance(module, BaseTunerLayer):
1140
+ module.unmerge()
1141
+
1142
+ else:
1143
+ deprecate("unfuse_text_encoder_lora", "0.27", LORA_DEPRECATION_MESSAGE)
1144
+
1145
+ def unfuse_text_encoder_lora(text_encoder):
1146
+ for _, attn_module in text_encoder_attn_modules(text_encoder):
1147
+ if isinstance(attn_module.q_proj, PatchedLoraProjection):
1148
+ attn_module.q_proj._unfuse_lora()
1149
+ attn_module.k_proj._unfuse_lora()
1150
+ attn_module.v_proj._unfuse_lora()
1151
+ attn_module.out_proj._unfuse_lora()
1152
+
1153
+ for _, mlp_module in text_encoder_mlp_modules(text_encoder):
1154
+ if isinstance(mlp_module.fc1, PatchedLoraProjection):
1155
+ mlp_module.fc1._unfuse_lora()
1156
+ mlp_module.fc2._unfuse_lora()
1157
+
1158
+ if unfuse_text_encoder:
1159
+ if hasattr(self, "text_encoder"):
1160
+ unfuse_text_encoder_lora(self.text_encoder)
1161
+ if hasattr(self, "text_encoder_2"):
1162
+ unfuse_text_encoder_lora(self.text_encoder_2)
1163
+
1164
+ self.num_fused_loras -= 1
1165
+
1166
+ def set_adapters_for_text_encoder(
1167
+ self,
1168
+ adapter_names: Union[List[str], str],
1169
+ text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821
1170
+ text_encoder_weights: List[float] = None,
1171
+ ):
1172
+ """
1173
+ Sets the adapter layers for the text encoder.
1174
+
1175
+ Args:
1176
+ adapter_names (`List[str]` or `str`):
1177
+ The names of the adapters to use.
1178
+ text_encoder (`torch.nn.Module`, *optional*):
1179
+ The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder`
1180
+ attribute.
1181
+ text_encoder_weights (`List[float]`, *optional*):
1182
+ The weights to use for the text encoder. If `None`, the weights are set to `1.0` for all the adapters.
1183
+ """
1184
+ if not USE_PEFT_BACKEND:
1185
+ raise ValueError("PEFT backend is required for this method.")
1186
+
1187
+ def process_weights(adapter_names, weights):
1188
+ if weights is None:
1189
+ weights = [1.0] * len(adapter_names)
1190
+ elif isinstance(weights, float):
1191
+ weights = [weights]
1192
+
1193
+ if len(adapter_names) != len(weights):
1194
+ raise ValueError(
1195
+ f"Length of adapter names {len(adapter_names)} is not equal to the length of the weights {len(weights)}"
1196
+ )
1197
+ return weights
1198
+
1199
+ adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names
1200
+ text_encoder_weights = process_weights(adapter_names, text_encoder_weights)
1201
+ text_encoder = text_encoder or getattr(self, "text_encoder", None)
1202
+ if text_encoder is None:
1203
+ raise ValueError(
1204
+ "The pipeline does not have a default `pipe.text_encoder` class. Please make sure to pass a `text_encoder` instead."
1205
+ )
1206
+ set_weights_and_activate_adapters(text_encoder, adapter_names, text_encoder_weights)
1207
+
1208
+ def disable_lora_for_text_encoder(self, text_encoder: Optional["PreTrainedModel"] = None):
1209
+ """
1210
+ Disables the LoRA layers for the text encoder.
1211
+
1212
+ Args:
1213
+ text_encoder (`torch.nn.Module`, *optional*):
1214
+ The text encoder module to disable the LoRA layers for. If `None`, it will try to get the
1215
+ `text_encoder` attribute.
1216
+ """
1217
+ if not USE_PEFT_BACKEND:
1218
+ raise ValueError("PEFT backend is required for this method.")
1219
+
1220
+ text_encoder = text_encoder or getattr(self, "text_encoder", None)
1221
+ if text_encoder is None:
1222
+ raise ValueError("Text Encoder not found.")
1223
+ set_adapter_layers(text_encoder, enabled=False)
1224
+
1225
+ def enable_lora_for_text_encoder(self, text_encoder: Optional["PreTrainedModel"] = None):
1226
+ """
1227
+ Enables the LoRA layers for the text encoder.
1228
+
1229
+ Args:
1230
+ text_encoder (`torch.nn.Module`, *optional*):
1231
+ The text encoder module to enable the LoRA layers for. If `None`, it will try to get the `text_encoder`
1232
+ attribute.
1233
+ """
1234
+ if not USE_PEFT_BACKEND:
1235
+ raise ValueError("PEFT backend is required for this method.")
1236
+ text_encoder = text_encoder or getattr(self, "text_encoder", None)
1237
+ if text_encoder is None:
1238
+ raise ValueError("Text Encoder not found.")
1239
+ set_adapter_layers(self.text_encoder, enabled=True)
1240
+
1241
+ def set_adapters(
1242
+ self,
1243
+ adapter_names: Union[List[str], str],
1244
+ adapter_weights: Optional[List[float]] = None,
1245
+ ):
1246
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1247
+ # Handle the UNET
1248
+ unet.set_adapters(adapter_names, adapter_weights)
1249
+
1250
+ # Handle the Text Encoder
1251
+ if hasattr(self, "text_encoder"):
1252
+ self.set_adapters_for_text_encoder(adapter_names, self.text_encoder, adapter_weights)
1253
+ if hasattr(self, "text_encoder_2"):
1254
+ self.set_adapters_for_text_encoder(adapter_names, self.text_encoder_2, adapter_weights)
1255
+
1256
+ def disable_lora(self):
1257
+ if not USE_PEFT_BACKEND:
1258
+ raise ValueError("PEFT backend is required for this method.")
1259
+
1260
+ # Disable unet adapters
1261
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1262
+ unet.disable_lora()
1263
+
1264
+ # Disable text encoder adapters
1265
+ if hasattr(self, "text_encoder"):
1266
+ self.disable_lora_for_text_encoder(self.text_encoder)
1267
+ if hasattr(self, "text_encoder_2"):
1268
+ self.disable_lora_for_text_encoder(self.text_encoder_2)
1269
+
1270
+ def enable_lora(self):
1271
+ if not USE_PEFT_BACKEND:
1272
+ raise ValueError("PEFT backend is required for this method.")
1273
+
1274
+ # Enable unet adapters
1275
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1276
+ unet.enable_lora()
1277
+
1278
+ # Enable text encoder adapters
1279
+ if hasattr(self, "text_encoder"):
1280
+ self.enable_lora_for_text_encoder(self.text_encoder)
1281
+ if hasattr(self, "text_encoder_2"):
1282
+ self.enable_lora_for_text_encoder(self.text_encoder_2)
1283
+
1284
+ def delete_adapters(self, adapter_names: Union[List[str], str]):
1285
+ """
1286
+ Args:
1287
+ Deletes the LoRA layers of `adapter_name` for the unet and text-encoder(s).
1288
+ adapter_names (`Union[List[str], str]`):
1289
+ The names of the adapter to delete. Can be a single string or a list of strings
1290
+ """
1291
+ if not USE_PEFT_BACKEND:
1292
+ raise ValueError("PEFT backend is required for this method.")
1293
+
1294
+ if isinstance(adapter_names, str):
1295
+ adapter_names = [adapter_names]
1296
+
1297
+ # Delete unet adapters
1298
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1299
+ unet.delete_adapters(adapter_names)
1300
+
1301
+ for adapter_name in adapter_names:
1302
+ # Delete text encoder adapters
1303
+ if hasattr(self, "text_encoder"):
1304
+ delete_adapter_layers(self.text_encoder, adapter_name)
1305
+ if hasattr(self, "text_encoder_2"):
1306
+ delete_adapter_layers(self.text_encoder_2, adapter_name)
1307
+
1308
+ def get_active_adapters(self) -> List[str]:
1309
+ """
1310
+ Gets the list of the current active adapters.
1311
+
1312
+ Example:
1313
+
1314
+ ```python
1315
+ from diffusers import DiffusionPipeline
1316
+
1317
+ pipeline = DiffusionPipeline.from_pretrained(
1318
+ "stabilityai/stable-diffusion-xl-base-1.0",
1319
+ ).to("cuda")
1320
+ pipeline.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors", adapter_name="toy")
1321
+ pipeline.get_active_adapters()
1322
+ ```
1323
+ """
1324
+ if not USE_PEFT_BACKEND:
1325
+ raise ValueError(
1326
+ "PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`"
1327
+ )
1328
+
1329
+ from peft.tuners.tuners_utils import BaseTunerLayer
1330
+
1331
+ active_adapters = []
1332
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1333
+ for module in unet.modules():
1334
+ if isinstance(module, BaseTunerLayer):
1335
+ active_adapters = module.active_adapters
1336
+ break
1337
+
1338
+ return active_adapters
1339
+
1340
+ def get_list_adapters(self) -> Dict[str, List[str]]:
1341
+ """
1342
+ Gets the current list of all available adapters in the pipeline.
1343
+ """
1344
+ if not USE_PEFT_BACKEND:
1345
+ raise ValueError(
1346
+ "PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`"
1347
+ )
1348
+
1349
+ set_adapters = {}
1350
+
1351
+ if hasattr(self, "text_encoder") and hasattr(self.text_encoder, "peft_config"):
1352
+ set_adapters["text_encoder"] = list(self.text_encoder.peft_config.keys())
1353
+
1354
+ if hasattr(self, "text_encoder_2") and hasattr(self.text_encoder_2, "peft_config"):
1355
+ set_adapters["text_encoder_2"] = list(self.text_encoder_2.peft_config.keys())
1356
+
1357
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1358
+ if hasattr(self, self.unet_name) and hasattr(unet, "peft_config"):
1359
+ set_adapters[self.unet_name] = list(self.unet.peft_config.keys())
1360
+
1361
+ return set_adapters
1362
+
1363
+ def set_lora_device(self, adapter_names: List[str], device: Union[torch.device, str, int]) -> None:
1364
+ """
1365
+ Moves the LoRAs listed in `adapter_names` to a target device. Useful for offloading the LoRA to the CPU in case
1366
+ you want to load multiple adapters and free some GPU memory.
1367
+
1368
+ Args:
1369
+ adapter_names (`List[str]`):
1370
+ List of adapters to send device to.
1371
+ device (`Union[torch.device, str, int]`):
1372
+ Device to send the adapters to. Can be either a torch device, a str or an integer.
1373
+ """
1374
+ if not USE_PEFT_BACKEND:
1375
+ raise ValueError("PEFT backend is required for this method.")
1376
+
1377
+ from peft.tuners.tuners_utils import BaseTunerLayer
1378
+
1379
+ # Handle the UNET
1380
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
1381
+ for unet_module in unet.modules():
1382
+ if isinstance(unet_module, BaseTunerLayer):
1383
+ for adapter_name in adapter_names:
1384
+ unet_module.lora_A[adapter_name].to(device)
1385
+ unet_module.lora_B[adapter_name].to(device)
1386
+
1387
+ # Handle the text encoder
1388
+ modules_to_process = []
1389
+ if hasattr(self, "text_encoder"):
1390
+ modules_to_process.append(self.text_encoder)
1391
+
1392
+ if hasattr(self, "text_encoder_2"):
1393
+ modules_to_process.append(self.text_encoder_2)
1394
+
1395
+ for text_encoder in modules_to_process:
1396
+ # loop over submodules
1397
+ for text_encoder_module in text_encoder.modules():
1398
+ if isinstance(text_encoder_module, BaseTunerLayer):
1399
+ for adapter_name in adapter_names:
1400
+ text_encoder_module.lora_A[adapter_name].to(device)
1401
+ text_encoder_module.lora_B[adapter_name].to(device)
1402
+
1403
+
1404
+ class StableDiffusionXLLoraLoaderMixin(LoraLoaderMixin):
1405
+ """This class overrides `LoraLoaderMixin` with LoRA loading/saving code that's specific to SDXL"""
1406
+
1407
+ # Overrride to properly handle the loading and unloading of the additional text encoder.
1408
+ def load_lora_weights(
1409
+ self,
1410
+ pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
1411
+ adapter_name: Optional[str] = None,
1412
+ **kwargs,
1413
+ ):
1414
+ """
1415
+ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
1416
+ `self.text_encoder`.
1417
+
1418
+ All kwargs are forwarded to `self.lora_state_dict`.
1419
+
1420
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
1421
+
1422
+ See [`~loaders.LoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into
1423
+ `self.unet`.
1424
+
1425
+ See [`~loaders.LoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded
1426
+ into `self.text_encoder`.
1427
+
1428
+ Parameters:
1429
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
1430
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`].
1431
+ adapter_name (`str`, *optional*):
1432
+ Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
1433
+ `default_{i}` where i is the total number of adapters being loaded.
1434
+ kwargs (`dict`, *optional*):
1435
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`].
1436
+ """
1437
+ # We could have accessed the unet config from `lora_state_dict()` too. We pass
1438
+ # it here explicitly to be able to tell that it's coming from an SDXL
1439
+ # pipeline.
1440
+
1441
+ # First, ensure that the checkpoint is a compatible one and can be successfully loaded.
1442
+ state_dict, network_alphas = self.lora_state_dict(
1443
+ pretrained_model_name_or_path_or_dict,
1444
+ unet_config=self.unet.config,
1445
+ **kwargs,
1446
+ )
1447
+ is_correct_format = all("lora" in key for key in state_dict.keys())
1448
+ if not is_correct_format:
1449
+ raise ValueError("Invalid LoRA checkpoint.")
1450
+
1451
+ self.load_lora_into_unet(
1452
+ state_dict, network_alphas=network_alphas, unet=self.unet, adapter_name=adapter_name, _pipeline=self
1453
+ )
1454
+ text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
1455
+ if len(text_encoder_state_dict) > 0:
1456
+ self.load_lora_into_text_encoder(
1457
+ text_encoder_state_dict,
1458
+ network_alphas=network_alphas,
1459
+ text_encoder=self.text_encoder,
1460
+ prefix="text_encoder",
1461
+ lora_scale=self.lora_scale,
1462
+ adapter_name=adapter_name,
1463
+ _pipeline=self,
1464
+ )
1465
+
1466
+ text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
1467
+ if len(text_encoder_2_state_dict) > 0:
1468
+ self.load_lora_into_text_encoder(
1469
+ text_encoder_2_state_dict,
1470
+ network_alphas=network_alphas,
1471
+ text_encoder=self.text_encoder_2,
1472
+ prefix="text_encoder_2",
1473
+ lora_scale=self.lora_scale,
1474
+ adapter_name=adapter_name,
1475
+ _pipeline=self,
1476
+ )
1477
+
1478
+ @classmethod
1479
+ def save_lora_weights(
1480
+ cls,
1481
+ save_directory: Union[str, os.PathLike],
1482
+ unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1483
+ text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1484
+ text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1485
+ is_main_process: bool = True,
1486
+ weight_name: str = None,
1487
+ save_function: Callable = None,
1488
+ safe_serialization: bool = True,
1489
+ ):
1490
+ r"""
1491
+ Save the LoRA parameters corresponding to the UNet and text encoder.
1492
+
1493
+ Arguments:
1494
+ save_directory (`str` or `os.PathLike`):
1495
+ Directory to save LoRA parameters to. Will be created if it doesn't exist.
1496
+ unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
1497
+ State dict of the LoRA layers corresponding to the `unet`.
1498
+ text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
1499
+ State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
1500
+ encoder LoRA state dict because it comes from 🤗 Transformers.
1501
+ is_main_process (`bool`, *optional*, defaults to `True`):
1502
+ Whether the process calling this is the main process or not. Useful during distributed training and you
1503
+ need to call this function on all processes. In this case, set `is_main_process=True` only on the main
1504
+ process to avoid race conditions.
1505
+ save_function (`Callable`):
1506
+ The function to use to save the state dictionary. Useful during distributed training when you need to
1507
+ replace `torch.save` with another method. Can be configured with the environment variable
1508
+ `DIFFUSERS_SAVE_MODE`.
1509
+ safe_serialization (`bool`, *optional*, defaults to `True`):
1510
+ Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
1511
+ """
1512
+ state_dict = {}
1513
+
1514
+ def pack_weights(layers, prefix):
1515
+ layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
1516
+ layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
1517
+ return layers_state_dict
1518
+
1519
+ if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers):
1520
+ raise ValueError(
1521
+ "You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`."
1522
+ )
1523
+
1524
+ if unet_lora_layers:
1525
+ state_dict.update(pack_weights(unet_lora_layers, "unet"))
1526
+
1527
+ if text_encoder_lora_layers and text_encoder_2_lora_layers:
1528
+ state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
1529
+ state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
1530
+
1531
+ cls.write_lora_layers(
1532
+ state_dict=state_dict,
1533
+ save_directory=save_directory,
1534
+ is_main_process=is_main_process,
1535
+ weight_name=weight_name,
1536
+ save_function=save_function,
1537
+ safe_serialization=safe_serialization,
1538
+ )
1539
+
1540
+ def _remove_text_encoder_monkey_patch(self):
1541
+ if USE_PEFT_BACKEND:
1542
+ recurse_remove_peft_layers(self.text_encoder)
1543
+ # TODO: @younesbelkada handle this in transformers side
1544
+ if getattr(self.text_encoder, "peft_config", None) is not None:
1545
+ del self.text_encoder.peft_config
1546
+ self.text_encoder._hf_peft_config_loaded = None
1547
+
1548
+ recurse_remove_peft_layers(self.text_encoder_2)
1549
+ if getattr(self.text_encoder_2, "peft_config", None) is not None:
1550
+ del self.text_encoder_2.peft_config
1551
+ self.text_encoder_2._hf_peft_config_loaded = None
1552
+ else:
1553
+ self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
1554
+ self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
diffusers/loaders/lora_conversion_utils.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import re
16
+
17
+ from ..utils import logging
18
+
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+
23
+ def _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config, delimiter="_", block_slice_pos=5):
24
+ # 1. get all state_dict_keys
25
+ all_keys = list(state_dict.keys())
26
+ sgm_patterns = ["input_blocks", "middle_block", "output_blocks"]
27
+
28
+ # 2. check if needs remapping, if not return original dict
29
+ is_in_sgm_format = False
30
+ for key in all_keys:
31
+ if any(p in key for p in sgm_patterns):
32
+ is_in_sgm_format = True
33
+ break
34
+
35
+ if not is_in_sgm_format:
36
+ return state_dict
37
+
38
+ # 3. Else remap from SGM patterns
39
+ new_state_dict = {}
40
+ inner_block_map = ["resnets", "attentions", "upsamplers"]
41
+
42
+ # Retrieves # of down, mid and up blocks
43
+ input_block_ids, middle_block_ids, output_block_ids = set(), set(), set()
44
+
45
+ for layer in all_keys:
46
+ if "text" in layer:
47
+ new_state_dict[layer] = state_dict.pop(layer)
48
+ else:
49
+ layer_id = int(layer.split(delimiter)[:block_slice_pos][-1])
50
+ if sgm_patterns[0] in layer:
51
+ input_block_ids.add(layer_id)
52
+ elif sgm_patterns[1] in layer:
53
+ middle_block_ids.add(layer_id)
54
+ elif sgm_patterns[2] in layer:
55
+ output_block_ids.add(layer_id)
56
+ else:
57
+ raise ValueError(f"Checkpoint not supported because layer {layer} not supported.")
58
+
59
+ input_blocks = {
60
+ layer_id: [key for key in state_dict if f"input_blocks{delimiter}{layer_id}" in key]
61
+ for layer_id in input_block_ids
62
+ }
63
+ middle_blocks = {
64
+ layer_id: [key for key in state_dict if f"middle_block{delimiter}{layer_id}" in key]
65
+ for layer_id in middle_block_ids
66
+ }
67
+ output_blocks = {
68
+ layer_id: [key for key in state_dict if f"output_blocks{delimiter}{layer_id}" in key]
69
+ for layer_id in output_block_ids
70
+ }
71
+
72
+ # Rename keys accordingly
73
+ for i in input_block_ids:
74
+ block_id = (i - 1) // (unet_config.layers_per_block + 1)
75
+ layer_in_block_id = (i - 1) % (unet_config.layers_per_block + 1)
76
+
77
+ for key in input_blocks[i]:
78
+ inner_block_id = int(key.split(delimiter)[block_slice_pos])
79
+ inner_block_key = inner_block_map[inner_block_id] if "op" not in key else "downsamplers"
80
+ inner_layers_in_block = str(layer_in_block_id) if "op" not in key else "0"
81
+ new_key = delimiter.join(
82
+ key.split(delimiter)[: block_slice_pos - 1]
83
+ + [str(block_id), inner_block_key, inner_layers_in_block]
84
+ + key.split(delimiter)[block_slice_pos + 1 :]
85
+ )
86
+ new_state_dict[new_key] = state_dict.pop(key)
87
+
88
+ for i in middle_block_ids:
89
+ key_part = None
90
+ if i == 0:
91
+ key_part = [inner_block_map[0], "0"]
92
+ elif i == 1:
93
+ key_part = [inner_block_map[1], "0"]
94
+ elif i == 2:
95
+ key_part = [inner_block_map[0], "1"]
96
+ else:
97
+ raise ValueError(f"Invalid middle block id {i}.")
98
+
99
+ for key in middle_blocks[i]:
100
+ new_key = delimiter.join(
101
+ key.split(delimiter)[: block_slice_pos - 1] + key_part + key.split(delimiter)[block_slice_pos:]
102
+ )
103
+ new_state_dict[new_key] = state_dict.pop(key)
104
+
105
+ for i in output_block_ids:
106
+ block_id = i // (unet_config.layers_per_block + 1)
107
+ layer_in_block_id = i % (unet_config.layers_per_block + 1)
108
+
109
+ for key in output_blocks[i]:
110
+ inner_block_id = int(key.split(delimiter)[block_slice_pos])
111
+ inner_block_key = inner_block_map[inner_block_id]
112
+ inner_layers_in_block = str(layer_in_block_id) if inner_block_id < 2 else "0"
113
+ new_key = delimiter.join(
114
+ key.split(delimiter)[: block_slice_pos - 1]
115
+ + [str(block_id), inner_block_key, inner_layers_in_block]
116
+ + key.split(delimiter)[block_slice_pos + 1 :]
117
+ )
118
+ new_state_dict[new_key] = state_dict.pop(key)
119
+
120
+ if len(state_dict) > 0:
121
+ raise ValueError("At this point all state dict entries have to be converted.")
122
+
123
+ return new_state_dict
124
+
125
+
126
+ def _convert_kohya_lora_to_diffusers(state_dict, unet_name="unet", text_encoder_name="text_encoder"):
127
+ unet_state_dict = {}
128
+ te_state_dict = {}
129
+ te2_state_dict = {}
130
+ network_alphas = {}
131
+
132
+ # every down weight has a corresponding up weight and potentially an alpha weight
133
+ lora_keys = [k for k in state_dict.keys() if k.endswith("lora_down.weight")]
134
+ for key in lora_keys:
135
+ lora_name = key.split(".")[0]
136
+ lora_name_up = lora_name + ".lora_up.weight"
137
+ lora_name_alpha = lora_name + ".alpha"
138
+
139
+ if lora_name.startswith("lora_unet_"):
140
+ diffusers_name = key.replace("lora_unet_", "").replace("_", ".")
141
+
142
+ if "input.blocks" in diffusers_name:
143
+ diffusers_name = diffusers_name.replace("input.blocks", "down_blocks")
144
+ else:
145
+ diffusers_name = diffusers_name.replace("down.blocks", "down_blocks")
146
+
147
+ if "middle.block" in diffusers_name:
148
+ diffusers_name = diffusers_name.replace("middle.block", "mid_block")
149
+ else:
150
+ diffusers_name = diffusers_name.replace("mid.block", "mid_block")
151
+ if "output.blocks" in diffusers_name:
152
+ diffusers_name = diffusers_name.replace("output.blocks", "up_blocks")
153
+ else:
154
+ diffusers_name = diffusers_name.replace("up.blocks", "up_blocks")
155
+
156
+ diffusers_name = diffusers_name.replace("transformer.blocks", "transformer_blocks")
157
+ diffusers_name = diffusers_name.replace("to.q.lora", "to_q_lora")
158
+ diffusers_name = diffusers_name.replace("to.k.lora", "to_k_lora")
159
+ diffusers_name = diffusers_name.replace("to.v.lora", "to_v_lora")
160
+ diffusers_name = diffusers_name.replace("to.out.0.lora", "to_out_lora")
161
+ diffusers_name = diffusers_name.replace("proj.in", "proj_in")
162
+ diffusers_name = diffusers_name.replace("proj.out", "proj_out")
163
+ diffusers_name = diffusers_name.replace("emb.layers", "time_emb_proj")
164
+
165
+ # SDXL specificity.
166
+ if "emb" in diffusers_name and "time.emb.proj" not in diffusers_name:
167
+ pattern = r"\.\d+(?=\D*$)"
168
+ diffusers_name = re.sub(pattern, "", diffusers_name, count=1)
169
+ if ".in." in diffusers_name:
170
+ diffusers_name = diffusers_name.replace("in.layers.2", "conv1")
171
+ if ".out." in diffusers_name:
172
+ diffusers_name = diffusers_name.replace("out.layers.3", "conv2")
173
+ if "downsamplers" in diffusers_name or "upsamplers" in diffusers_name:
174
+ diffusers_name = diffusers_name.replace("op", "conv")
175
+ if "skip" in diffusers_name:
176
+ diffusers_name = diffusers_name.replace("skip.connection", "conv_shortcut")
177
+
178
+ # LyCORIS specificity.
179
+ if "time.emb.proj" in diffusers_name:
180
+ diffusers_name = diffusers_name.replace("time.emb.proj", "time_emb_proj")
181
+ if "conv.shortcut" in diffusers_name:
182
+ diffusers_name = diffusers_name.replace("conv.shortcut", "conv_shortcut")
183
+
184
+ # General coverage.
185
+ if "transformer_blocks" in diffusers_name:
186
+ if "attn1" in diffusers_name or "attn2" in diffusers_name:
187
+ diffusers_name = diffusers_name.replace("attn1", "attn1.processor")
188
+ diffusers_name = diffusers_name.replace("attn2", "attn2.processor")
189
+ unet_state_dict[diffusers_name] = state_dict.pop(key)
190
+ unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
191
+ elif "ff" in diffusers_name:
192
+ unet_state_dict[diffusers_name] = state_dict.pop(key)
193
+ unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
194
+ elif any(key in diffusers_name for key in ("proj_in", "proj_out")):
195
+ unet_state_dict[diffusers_name] = state_dict.pop(key)
196
+ unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
197
+ else:
198
+ unet_state_dict[diffusers_name] = state_dict.pop(key)
199
+ unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
200
+
201
+ elif lora_name.startswith("lora_te_"):
202
+ diffusers_name = key.replace("lora_te_", "").replace("_", ".")
203
+ diffusers_name = diffusers_name.replace("text.model", "text_model")
204
+ diffusers_name = diffusers_name.replace("self.attn", "self_attn")
205
+ diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
206
+ diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
207
+ diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
208
+ diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
209
+ if "self_attn" in diffusers_name:
210
+ te_state_dict[diffusers_name] = state_dict.pop(key)
211
+ te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
212
+ elif "mlp" in diffusers_name:
213
+ # Be aware that this is the new diffusers convention and the rest of the code might
214
+ # not utilize it yet.
215
+ diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
216
+ te_state_dict[diffusers_name] = state_dict.pop(key)
217
+ te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
218
+
219
+ # (sayakpaul): Duplicate code. Needs to be cleaned.
220
+ elif lora_name.startswith("lora_te1_"):
221
+ diffusers_name = key.replace("lora_te1_", "").replace("_", ".")
222
+ diffusers_name = diffusers_name.replace("text.model", "text_model")
223
+ diffusers_name = diffusers_name.replace("self.attn", "self_attn")
224
+ diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
225
+ diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
226
+ diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
227
+ diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
228
+ if "self_attn" in diffusers_name:
229
+ te_state_dict[diffusers_name] = state_dict.pop(key)
230
+ te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
231
+ elif "mlp" in diffusers_name:
232
+ # Be aware that this is the new diffusers convention and the rest of the code might
233
+ # not utilize it yet.
234
+ diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
235
+ te_state_dict[diffusers_name] = state_dict.pop(key)
236
+ te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
237
+
238
+ # (sayakpaul): Duplicate code. Needs to be cleaned.
239
+ elif lora_name.startswith("lora_te2_"):
240
+ diffusers_name = key.replace("lora_te2_", "").replace("_", ".")
241
+ diffusers_name = diffusers_name.replace("text.model", "text_model")
242
+ diffusers_name = diffusers_name.replace("self.attn", "self_attn")
243
+ diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
244
+ diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
245
+ diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
246
+ diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
247
+ if "self_attn" in diffusers_name:
248
+ te2_state_dict[diffusers_name] = state_dict.pop(key)
249
+ te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
250
+ elif "mlp" in diffusers_name:
251
+ # Be aware that this is the new diffusers convention and the rest of the code might
252
+ # not utilize it yet.
253
+ diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
254
+ te2_state_dict[diffusers_name] = state_dict.pop(key)
255
+ te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
256
+
257
+ # Rename the alphas so that they can be mapped appropriately.
258
+ if lora_name_alpha in state_dict:
259
+ alpha = state_dict.pop(lora_name_alpha).item()
260
+ if lora_name_alpha.startswith("lora_unet_"):
261
+ prefix = "unet."
262
+ elif lora_name_alpha.startswith(("lora_te_", "lora_te1_")):
263
+ prefix = "text_encoder."
264
+ else:
265
+ prefix = "text_encoder_2."
266
+ new_name = prefix + diffusers_name.split(".lora.")[0] + ".alpha"
267
+ network_alphas.update({new_name: alpha})
268
+
269
+ if len(state_dict) > 0:
270
+ raise ValueError(f"The following keys have not been correctly be renamed: \n\n {', '.join(state_dict.keys())}")
271
+
272
+ logger.info("Kohya-style checkpoint detected.")
273
+ unet_state_dict = {f"{unet_name}.{module_name}": params for module_name, params in unet_state_dict.items()}
274
+ te_state_dict = {f"{text_encoder_name}.{module_name}": params for module_name, params in te_state_dict.items()}
275
+ te2_state_dict = (
276
+ {f"text_encoder_2.{module_name}": params for module_name, params in te2_state_dict.items()}
277
+ if len(te2_state_dict) > 0
278
+ else None
279
+ )
280
+ if te2_state_dict is not None:
281
+ te_state_dict.update(te2_state_dict)
282
+
283
+ new_state_dict = {**unet_state_dict, **te_state_dict}
284
+ return new_state_dict, network_alphas
diffusers/loaders/peft.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from typing import List, Union
16
+
17
+ from ..utils import MIN_PEFT_VERSION, check_peft_version, is_peft_available
18
+
19
+
20
+ class PeftAdapterMixin:
21
+ """
22
+ A class containing all functions for loading and using adapters weights that are supported in PEFT library. For
23
+ more details about adapters and injecting them in a transformer-based model, check out the PEFT [documentation](https://huggingface.co/docs/peft/index).
24
+
25
+ Install the latest version of PEFT, and use this mixin to:
26
+
27
+ - Attach new adapters in the model.
28
+ - Attach multiple adapters and iteratively activate/deactivate them.
29
+ - Activate/deactivate all adapters from the model.
30
+ - Get a list of the active adapters.
31
+ """
32
+
33
+ _hf_peft_config_loaded = False
34
+
35
+ def add_adapter(self, adapter_config, adapter_name: str = "default") -> None:
36
+ r"""
37
+ Adds a new adapter to the current model for training. If no adapter name is passed, a default name is assigned
38
+ to the adapter to follow the convention of the PEFT library.
39
+
40
+ If you are not familiar with adapters and PEFT methods, we invite you to read more about them in the PEFT
41
+ [documentation](https://huggingface.co/docs/peft).
42
+
43
+ Args:
44
+ adapter_config (`[~peft.PeftConfig]`):
45
+ The configuration of the adapter to add; supported adapters are non-prefix tuning and adaption prompt
46
+ methods.
47
+ adapter_name (`str`, *optional*, defaults to `"default"`):
48
+ The name of the adapter to add. If no name is passed, a default name is assigned to the adapter.
49
+ """
50
+ check_peft_version(min_version=MIN_PEFT_VERSION)
51
+
52
+ if not is_peft_available():
53
+ raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.")
54
+
55
+ from peft import PeftConfig, inject_adapter_in_model
56
+
57
+ if not self._hf_peft_config_loaded:
58
+ self._hf_peft_config_loaded = True
59
+ elif adapter_name in self.peft_config:
60
+ raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.")
61
+
62
+ if not isinstance(adapter_config, PeftConfig):
63
+ raise ValueError(
64
+ f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead."
65
+ )
66
+
67
+ # Unlike transformers, here we don't need to retrieve the name_or_path of the unet as the loading logic is
68
+ # handled by the `load_lora_layers` or `LoraLoaderMixin`. Therefore we set it to `None` here.
69
+ adapter_config.base_model_name_or_path = None
70
+ inject_adapter_in_model(adapter_config, self, adapter_name)
71
+ self.set_adapter(adapter_name)
72
+
73
+ def set_adapter(self, adapter_name: Union[str, List[str]]) -> None:
74
+ """
75
+ Sets a specific adapter by forcing the model to only use that adapter and disables the other adapters.
76
+
77
+ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
78
+ [documentation](https://huggingface.co/docs/peft).
79
+
80
+ Args:
81
+ adapter_name (Union[str, List[str]])):
82
+ The list of adapters to set or the adapter name in the case of a single adapter.
83
+ """
84
+ check_peft_version(min_version=MIN_PEFT_VERSION)
85
+
86
+ if not self._hf_peft_config_loaded:
87
+ raise ValueError("No adapter loaded. Please load an adapter first.")
88
+
89
+ if isinstance(adapter_name, str):
90
+ adapter_name = [adapter_name]
91
+
92
+ missing = set(adapter_name) - set(self.peft_config)
93
+ if len(missing) > 0:
94
+ raise ValueError(
95
+ f"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s)."
96
+ f" current loaded adapters are: {list(self.peft_config.keys())}"
97
+ )
98
+
99
+ from peft.tuners.tuners_utils import BaseTunerLayer
100
+
101
+ _adapters_has_been_set = False
102
+
103
+ for _, module in self.named_modules():
104
+ if isinstance(module, BaseTunerLayer):
105
+ if hasattr(module, "set_adapter"):
106
+ module.set_adapter(adapter_name)
107
+ # Previous versions of PEFT does not support multi-adapter inference
108
+ elif not hasattr(module, "set_adapter") and len(adapter_name) != 1:
109
+ raise ValueError(
110
+ "You are trying to set multiple adapters and you have a PEFT version that does not support multi-adapter inference. Please upgrade to the latest version of PEFT."
111
+ " `pip install -U peft` or `pip install -U git+https://github.com/huggingface/peft.git`"
112
+ )
113
+ else:
114
+ module.active_adapter = adapter_name
115
+ _adapters_has_been_set = True
116
+
117
+ if not _adapters_has_been_set:
118
+ raise ValueError(
119
+ "Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters."
120
+ )
121
+
122
+ def disable_adapters(self) -> None:
123
+ r"""
124
+ Disable all adapters attached to the model and fallback to inference with the base model only.
125
+
126
+ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
127
+ [documentation](https://huggingface.co/docs/peft).
128
+ """
129
+ check_peft_version(min_version=MIN_PEFT_VERSION)
130
+
131
+ if not self._hf_peft_config_loaded:
132
+ raise ValueError("No adapter loaded. Please load an adapter first.")
133
+
134
+ from peft.tuners.tuners_utils import BaseTunerLayer
135
+
136
+ for _, module in self.named_modules():
137
+ if isinstance(module, BaseTunerLayer):
138
+ if hasattr(module, "enable_adapters"):
139
+ module.enable_adapters(enabled=False)
140
+ else:
141
+ # support for older PEFT versions
142
+ module.disable_adapters = True
143
+
144
+ def enable_adapters(self) -> None:
145
+ """
146
+ Enable adapters that are attached to the model. The model uses `self.active_adapters()` to retrieve the
147
+ list of adapters to enable.
148
+
149
+ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
150
+ [documentation](https://huggingface.co/docs/peft).
151
+ """
152
+ check_peft_version(min_version=MIN_PEFT_VERSION)
153
+
154
+ if not self._hf_peft_config_loaded:
155
+ raise ValueError("No adapter loaded. Please load an adapter first.")
156
+
157
+ from peft.tuners.tuners_utils import BaseTunerLayer
158
+
159
+ for _, module in self.named_modules():
160
+ if isinstance(module, BaseTunerLayer):
161
+ if hasattr(module, "enable_adapters"):
162
+ module.enable_adapters(enabled=True)
163
+ else:
164
+ # support for older PEFT versions
165
+ module.disable_adapters = False
166
+
167
+ def active_adapters(self) -> List[str]:
168
+ """
169
+ Gets the current list of active adapters of the model.
170
+
171
+ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
172
+ [documentation](https://huggingface.co/docs/peft).
173
+ """
174
+ check_peft_version(min_version=MIN_PEFT_VERSION)
175
+
176
+ if not is_peft_available():
177
+ raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.")
178
+
179
+ if not self._hf_peft_config_loaded:
180
+ raise ValueError("No adapter loaded. Please load an adapter first.")
181
+
182
+ from peft.tuners.tuners_utils import BaseTunerLayer
183
+
184
+ for _, module in self.named_modules():
185
+ if isinstance(module, BaseTunerLayer):
186
+ return module.active_adapter
diffusers/loaders/single_file.py ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from contextlib import nullcontext
15
+ from io import BytesIO
16
+ from pathlib import Path
17
+
18
+ import requests
19
+ import torch
20
+ import yaml
21
+ from huggingface_hub import hf_hub_download
22
+ from huggingface_hub.utils import validate_hf_hub_args
23
+
24
+ from ..utils import deprecate, is_accelerate_available, is_transformers_available, logging
25
+
26
+
27
+ if is_transformers_available():
28
+ pass
29
+
30
+ if is_accelerate_available():
31
+ from accelerate import init_empty_weights
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ class FromSingleFileMixin:
37
+ """
38
+ Load model weights saved in the `.ckpt` format into a [`DiffusionPipeline`].
39
+ """
40
+
41
+ @classmethod
42
+ def from_ckpt(cls, *args, **kwargs):
43
+ deprecation_message = "The function `from_ckpt` is deprecated in favor of `from_single_file` and will be removed in diffusers v.0.21. Please make sure to use `StableDiffusionPipeline.from_single_file(...)` instead."
44
+ deprecate("from_ckpt", "0.21.0", deprecation_message, standard_warn=False)
45
+ return cls.from_single_file(*args, **kwargs)
46
+
47
+ @classmethod
48
+ @validate_hf_hub_args
49
+ def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
50
+ r"""
51
+ Instantiate a [`DiffusionPipeline`] from pretrained pipeline weights saved in the `.ckpt` or `.safetensors`
52
+ format. The pipeline is set in evaluation mode (`model.eval()`) by default.
53
+
54
+ Parameters:
55
+ pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
56
+ Can be either:
57
+ - A link to the `.ckpt` file (for example
58
+ `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
59
+ - A path to a *file* containing all pipeline weights.
60
+ torch_dtype (`str` or `torch.dtype`, *optional*):
61
+ Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
62
+ dtype is automatically derived from the model's weights.
63
+ force_download (`bool`, *optional*, defaults to `False`):
64
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
65
+ cached versions if they exist.
66
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
67
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
68
+ is not used.
69
+ resume_download (`bool`, *optional*, defaults to `False`):
70
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
71
+ incompletely downloaded files are deleted.
72
+ proxies (`Dict[str, str]`, *optional*):
73
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
74
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
75
+ local_files_only (`bool`, *optional*, defaults to `False`):
76
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
77
+ won't be downloaded from the Hub.
78
+ token (`str` or *bool*, *optional*):
79
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
80
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
81
+ revision (`str`, *optional*, defaults to `"main"`):
82
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
83
+ allowed by Git.
84
+ use_safetensors (`bool`, *optional*, defaults to `None`):
85
+ If set to `None`, the safetensors weights are downloaded if they're available **and** if the
86
+ safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
87
+ weights. If set to `False`, safetensors weights are not loaded.
88
+ extract_ema (`bool`, *optional*, defaults to `False`):
89
+ Whether to extract the EMA weights or not. Pass `True` to extract the EMA weights which usually yield
90
+ higher quality images for inference. Non-EMA weights are usually better for continuing finetuning.
91
+ upcast_attention (`bool`, *optional*, defaults to `None`):
92
+ Whether the attention computation should always be upcasted.
93
+ image_size (`int`, *optional*, defaults to 512):
94
+ The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
95
+ Diffusion v2 base model. Use 768 for Stable Diffusion v2.
96
+ prediction_type (`str`, *optional*):
97
+ The prediction type the model was trained on. Use `'epsilon'` for all Stable Diffusion v1 models and
98
+ the Stable Diffusion v2 base model. Use `'v_prediction'` for Stable Diffusion v2.
99
+ num_in_channels (`int`, *optional*, defaults to `None`):
100
+ The number of input channels. If `None`, it is automatically inferred.
101
+ scheduler_type (`str`, *optional*, defaults to `"pndm"`):
102
+ Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm",
103
+ "ddim"]`.
104
+ load_safety_checker (`bool`, *optional*, defaults to `True`):
105
+ Whether to load the safety checker or not.
106
+ text_encoder ([`~transformers.CLIPTextModel`], *optional*, defaults to `None`):
107
+ An instance of `CLIPTextModel` to use, specifically the
108
+ [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. If this
109
+ parameter is `None`, the function loads a new instance of `CLIPTextModel` by itself if needed.
110
+ vae (`AutoencoderKL`, *optional*, defaults to `None`):
111
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. If
112
+ this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed.
113
+ tokenizer ([`~transformers.CLIPTokenizer`], *optional*, defaults to `None`):
114
+ An instance of `CLIPTokenizer` to use. If this parameter is `None`, the function loads a new instance
115
+ of `CLIPTokenizer` by itself if needed.
116
+ original_config_file (`str`):
117
+ Path to `.yaml` config file corresponding to the original architecture. If `None`, will be
118
+ automatically inferred by looking for a key that only exists in SD2.0 models.
119
+ kwargs (remaining dictionary of keyword arguments, *optional*):
120
+ Can be used to overwrite load and saveable variables (for example the pipeline components of the
121
+ specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
122
+ method. See example below for more information.
123
+
124
+ Examples:
125
+
126
+ ```py
127
+ >>> from diffusers import StableDiffusionPipeline
128
+
129
+ >>> # Download pipeline from huggingface.co and cache.
130
+ >>> pipeline = StableDiffusionPipeline.from_single_file(
131
+ ... "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors"
132
+ ... )
133
+
134
+ >>> # Download pipeline from local file
135
+ >>> # file is downloaded under ./v1-5-pruned-emaonly.ckpt
136
+ >>> pipeline = StableDiffusionPipeline.from_single_file("./v1-5-pruned-emaonly")
137
+
138
+ >>> # Enable float16 and move to GPU
139
+ >>> pipeline = StableDiffusionPipeline.from_single_file(
140
+ ... "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt",
141
+ ... torch_dtype=torch.float16,
142
+ ... )
143
+ >>> pipeline.to("cuda")
144
+ ```
145
+ """
146
+ # import here to avoid circular dependency
147
+ from ..pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
148
+
149
+ original_config_file = kwargs.pop("original_config_file", None)
150
+ config_files = kwargs.pop("config_files", None)
151
+ cache_dir = kwargs.pop("cache_dir", None)
152
+ resume_download = kwargs.pop("resume_download", False)
153
+ force_download = kwargs.pop("force_download", False)
154
+ proxies = kwargs.pop("proxies", None)
155
+ local_files_only = kwargs.pop("local_files_only", None)
156
+ token = kwargs.pop("token", None)
157
+ revision = kwargs.pop("revision", None)
158
+ extract_ema = kwargs.pop("extract_ema", False)
159
+ image_size = kwargs.pop("image_size", None)
160
+ scheduler_type = kwargs.pop("scheduler_type", "pndm")
161
+ num_in_channels = kwargs.pop("num_in_channels", None)
162
+ upcast_attention = kwargs.pop("upcast_attention", None)
163
+ load_safety_checker = kwargs.pop("load_safety_checker", True)
164
+ prediction_type = kwargs.pop("prediction_type", None)
165
+ text_encoder = kwargs.pop("text_encoder", None)
166
+ text_encoder_2 = kwargs.pop("text_encoder_2", None)
167
+ vae = kwargs.pop("vae", None)
168
+ controlnet = kwargs.pop("controlnet", None)
169
+ adapter = kwargs.pop("adapter", None)
170
+ tokenizer = kwargs.pop("tokenizer", None)
171
+ tokenizer_2 = kwargs.pop("tokenizer_2", None)
172
+
173
+ torch_dtype = kwargs.pop("torch_dtype", None)
174
+
175
+ use_safetensors = kwargs.pop("use_safetensors", None)
176
+
177
+ pipeline_name = cls.__name__
178
+ file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
179
+ from_safetensors = file_extension == "safetensors"
180
+
181
+ if from_safetensors and use_safetensors is False:
182
+ raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")
183
+
184
+ # TODO: For now we only support stable diffusion
185
+ stable_unclip = None
186
+ model_type = None
187
+
188
+ if pipeline_name in [
189
+ "StableDiffusionControlNetPipeline",
190
+ "StableDiffusionControlNetImg2ImgPipeline",
191
+ "StableDiffusionControlNetInpaintPipeline",
192
+ ]:
193
+ from ..models.controlnet import ControlNetModel
194
+ from ..pipelines.controlnet.multicontrolnet import MultiControlNetModel
195
+
196
+ # list/tuple or a single instance of ControlNetModel or MultiControlNetModel
197
+ if not (
198
+ isinstance(controlnet, (ControlNetModel, MultiControlNetModel))
199
+ or isinstance(controlnet, (list, tuple))
200
+ and isinstance(controlnet[0], ControlNetModel)
201
+ ):
202
+ raise ValueError("ControlNet needs to be passed if loading from ControlNet pipeline.")
203
+ elif "StableDiffusion" in pipeline_name:
204
+ # Model type will be inferred from the checkpoint.
205
+ pass
206
+ elif pipeline_name == "StableUnCLIPPipeline":
207
+ model_type = "FrozenOpenCLIPEmbedder"
208
+ stable_unclip = "txt2img"
209
+ elif pipeline_name == "StableUnCLIPImg2ImgPipeline":
210
+ model_type = "FrozenOpenCLIPEmbedder"
211
+ stable_unclip = "img2img"
212
+ elif pipeline_name == "PaintByExamplePipeline":
213
+ model_type = "PaintByExample"
214
+ elif pipeline_name == "LDMTextToImagePipeline":
215
+ model_type = "LDMTextToImage"
216
+ else:
217
+ raise ValueError(f"Unhandled pipeline class: {pipeline_name}")
218
+
219
+ # remove huggingface url
220
+ has_valid_url_prefix = False
221
+ valid_url_prefixes = ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"]
222
+ for prefix in valid_url_prefixes:
223
+ if pretrained_model_link_or_path.startswith(prefix):
224
+ pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]
225
+ has_valid_url_prefix = True
226
+
227
+ # Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
228
+ ckpt_path = Path(pretrained_model_link_or_path)
229
+ if not ckpt_path.is_file():
230
+ if not has_valid_url_prefix:
231
+ raise ValueError(
232
+ f"The provided path is either not a file or a valid huggingface URL was not provided. Valid URLs begin with {', '.join(valid_url_prefixes)}"
233
+ )
234
+
235
+ # get repo_id and (potentially nested) file path of ckpt in repo
236
+ repo_id = "/".join(ckpt_path.parts[:2])
237
+ file_path = "/".join(ckpt_path.parts[2:])
238
+
239
+ if file_path.startswith("blob/"):
240
+ file_path = file_path[len("blob/") :]
241
+
242
+ if file_path.startswith("main/"):
243
+ file_path = file_path[len("main/") :]
244
+
245
+ pretrained_model_link_or_path = hf_hub_download(
246
+ repo_id,
247
+ filename=file_path,
248
+ cache_dir=cache_dir,
249
+ resume_download=resume_download,
250
+ proxies=proxies,
251
+ local_files_only=local_files_only,
252
+ token=token,
253
+ revision=revision,
254
+ force_download=force_download,
255
+ )
256
+
257
+ pipe = download_from_original_stable_diffusion_ckpt(
258
+ pretrained_model_link_or_path,
259
+ pipeline_class=cls,
260
+ model_type=model_type,
261
+ stable_unclip=stable_unclip,
262
+ controlnet=controlnet,
263
+ adapter=adapter,
264
+ from_safetensors=from_safetensors,
265
+ extract_ema=extract_ema,
266
+ image_size=image_size,
267
+ scheduler_type=scheduler_type,
268
+ num_in_channels=num_in_channels,
269
+ upcast_attention=upcast_attention,
270
+ load_safety_checker=load_safety_checker,
271
+ prediction_type=prediction_type,
272
+ text_encoder=text_encoder,
273
+ text_encoder_2=text_encoder_2,
274
+ vae=vae,
275
+ tokenizer=tokenizer,
276
+ tokenizer_2=tokenizer_2,
277
+ original_config_file=original_config_file,
278
+ config_files=config_files,
279
+ local_files_only=local_files_only,
280
+ )
281
+
282
+ if torch_dtype is not None:
283
+ pipe.to(dtype=torch_dtype)
284
+
285
+ return pipe
286
+
287
+
288
+ class FromOriginalVAEMixin:
289
+ """
290
+ Load pretrained ControlNet weights saved in the `.ckpt` or `.safetensors` format into an [`AutoencoderKL`].
291
+ """
292
+
293
+ @classmethod
294
+ @validate_hf_hub_args
295
+ def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
296
+ r"""
297
+ Instantiate a [`AutoencoderKL`] from pretrained ControlNet weights saved in the original `.ckpt` or
298
+ `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default.
299
+
300
+ Parameters:
301
+ pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
302
+ Can be either:
303
+ - A link to the `.ckpt` file (for example
304
+ `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
305
+ - A path to a *file* containing all pipeline weights.
306
+ torch_dtype (`str` or `torch.dtype`, *optional*):
307
+ Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
308
+ dtype is automatically derived from the model's weights.
309
+ force_download (`bool`, *optional*, defaults to `False`):
310
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
311
+ cached versions if they exist.
312
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
313
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
314
+ is not used.
315
+ resume_download (`bool`, *optional*, defaults to `False`):
316
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
317
+ incompletely downloaded files are deleted.
318
+ proxies (`Dict[str, str]`, *optional*):
319
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
320
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
321
+ local_files_only (`bool`, *optional*, defaults to `False`):
322
+ Whether to only load local model weights and configuration files or not. If set to True, the model
323
+ won't be downloaded from the Hub.
324
+ token (`str` or *bool*, *optional*):
325
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
326
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
327
+ revision (`str`, *optional*, defaults to `"main"`):
328
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
329
+ allowed by Git.
330
+ image_size (`int`, *optional*, defaults to 512):
331
+ The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
332
+ Diffusion v2 base model. Use 768 for Stable Diffusion v2.
333
+ use_safetensors (`bool`, *optional*, defaults to `None`):
334
+ If set to `None`, the safetensors weights are downloaded if they're available **and** if the
335
+ safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
336
+ weights. If set to `False`, safetensors weights are not loaded.
337
+ upcast_attention (`bool`, *optional*, defaults to `None`):
338
+ Whether the attention computation should always be upcasted.
339
+ scaling_factor (`float`, *optional*, defaults to 0.18215):
340
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
341
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
342
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
343
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z
344
+ = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution
345
+ Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
346
+ kwargs (remaining dictionary of keyword arguments, *optional*):
347
+ Can be used to overwrite load and saveable variables (for example the pipeline components of the
348
+ specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
349
+ method. See example below for more information.
350
+
351
+ <Tip warning={true}>
352
+
353
+ Make sure to pass both `image_size` and `scaling_factor` to `from_single_file()` if you're loading
354
+ a VAE from SDXL or a Stable Diffusion v2 model or higher.
355
+
356
+ </Tip>
357
+
358
+ Examples:
359
+
360
+ ```py
361
+ from diffusers import AutoencoderKL
362
+
363
+ url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors" # can also be local file
364
+ model = AutoencoderKL.from_single_file(url)
365
+ ```
366
+ """
367
+ from ..models import AutoencoderKL
368
+
369
+ # import here to avoid circular dependency
370
+ from ..pipelines.stable_diffusion.convert_from_ckpt import (
371
+ convert_ldm_vae_checkpoint,
372
+ create_vae_diffusers_config,
373
+ )
374
+
375
+ config_file = kwargs.pop("config_file", None)
376
+ cache_dir = kwargs.pop("cache_dir", None)
377
+ resume_download = kwargs.pop("resume_download", False)
378
+ force_download = kwargs.pop("force_download", False)
379
+ proxies = kwargs.pop("proxies", None)
380
+ local_files_only = kwargs.pop("local_files_only", None)
381
+ token = kwargs.pop("token", None)
382
+ revision = kwargs.pop("revision", None)
383
+ image_size = kwargs.pop("image_size", None)
384
+ scaling_factor = kwargs.pop("scaling_factor", None)
385
+ kwargs.pop("upcast_attention", None)
386
+
387
+ torch_dtype = kwargs.pop("torch_dtype", None)
388
+
389
+ use_safetensors = kwargs.pop("use_safetensors", None)
390
+
391
+ file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
392
+ from_safetensors = file_extension == "safetensors"
393
+
394
+ if from_safetensors and use_safetensors is False:
395
+ raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")
396
+
397
+ # remove huggingface url
398
+ for prefix in ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"]:
399
+ if pretrained_model_link_or_path.startswith(prefix):
400
+ pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]
401
+
402
+ # Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
403
+ ckpt_path = Path(pretrained_model_link_or_path)
404
+ if not ckpt_path.is_file():
405
+ # get repo_id and (potentially nested) file path of ckpt in repo
406
+ repo_id = "/".join(ckpt_path.parts[:2])
407
+ file_path = "/".join(ckpt_path.parts[2:])
408
+
409
+ if file_path.startswith("blob/"):
410
+ file_path = file_path[len("blob/") :]
411
+
412
+ if file_path.startswith("main/"):
413
+ file_path = file_path[len("main/") :]
414
+
415
+ pretrained_model_link_or_path = hf_hub_download(
416
+ repo_id,
417
+ filename=file_path,
418
+ cache_dir=cache_dir,
419
+ resume_download=resume_download,
420
+ proxies=proxies,
421
+ local_files_only=local_files_only,
422
+ token=token,
423
+ revision=revision,
424
+ force_download=force_download,
425
+ )
426
+
427
+ if from_safetensors:
428
+ from safetensors import safe_open
429
+
430
+ checkpoint = {}
431
+ with safe_open(pretrained_model_link_or_path, framework="pt", device="cpu") as f:
432
+ for key in f.keys():
433
+ checkpoint[key] = f.get_tensor(key)
434
+ else:
435
+ checkpoint = torch.load(pretrained_model_link_or_path, map_location="cpu")
436
+
437
+ if "state_dict" in checkpoint:
438
+ checkpoint = checkpoint["state_dict"]
439
+
440
+ if config_file is None:
441
+ config_url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
442
+ config_file = BytesIO(requests.get(config_url).content)
443
+
444
+ original_config = yaml.safe_load(config_file)
445
+
446
+ # default to sd-v1-5
447
+ image_size = image_size or 512
448
+
449
+ vae_config = create_vae_diffusers_config(original_config, image_size=image_size)
450
+ converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config)
451
+
452
+ if scaling_factor is None:
453
+ if (
454
+ "model" in original_config
455
+ and "params" in original_config["model"]
456
+ and "scale_factor" in original_config["model"]["params"]
457
+ ):
458
+ vae_scaling_factor = original_config["model"]["params"]["scale_factor"]
459
+ else:
460
+ vae_scaling_factor = 0.18215 # default SD scaling factor
461
+
462
+ vae_config["scaling_factor"] = vae_scaling_factor
463
+
464
+ ctx = init_empty_weights if is_accelerate_available() else nullcontext
465
+ with ctx():
466
+ vae = AutoencoderKL(**vae_config)
467
+
468
+ if is_accelerate_available():
469
+ from ..models.modeling_utils import load_model_dict_into_meta
470
+
471
+ load_model_dict_into_meta(vae, converted_vae_checkpoint, device="cpu")
472
+ else:
473
+ vae.load_state_dict(converted_vae_checkpoint)
474
+
475
+ if torch_dtype is not None:
476
+ vae.to(dtype=torch_dtype)
477
+
478
+ return vae
479
+
480
+
481
+ class FromOriginalControlnetMixin:
482
+ """
483
+ Load pretrained ControlNet weights saved in the `.ckpt` or `.safetensors` format into a [`ControlNetModel`].
484
+ """
485
+
486
+ @classmethod
487
+ @validate_hf_hub_args
488
+ def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
489
+ r"""
490
+ Instantiate a [`ControlNetModel`] from pretrained ControlNet weights saved in the original `.ckpt` or
491
+ `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default.
492
+
493
+ Parameters:
494
+ pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
495
+ Can be either:
496
+ - A link to the `.ckpt` file (for example
497
+ `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
498
+ - A path to a *file* containing all pipeline weights.
499
+ torch_dtype (`str` or `torch.dtype`, *optional*):
500
+ Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
501
+ dtype is automatically derived from the model's weights.
502
+ force_download (`bool`, *optional*, defaults to `False`):
503
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
504
+ cached versions if they exist.
505
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
506
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
507
+ is not used.
508
+ resume_download (`bool`, *optional*, defaults to `False`):
509
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
510
+ incompletely downloaded files are deleted.
511
+ proxies (`Dict[str, str]`, *optional*):
512
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
513
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
514
+ local_files_only (`bool`, *optional*, defaults to `False`):
515
+ Whether to only load local model weights and configuration files or not. If set to True, the model
516
+ won't be downloaded from the Hub.
517
+ token (`str` or *bool*, *optional*):
518
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
519
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
520
+ revision (`str`, *optional*, defaults to `"main"`):
521
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
522
+ allowed by Git.
523
+ use_safetensors (`bool`, *optional*, defaults to `None`):
524
+ If set to `None`, the safetensors weights are downloaded if they're available **and** if the
525
+ safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
526
+ weights. If set to `False`, safetensors weights are not loaded.
527
+ image_size (`int`, *optional*, defaults to 512):
528
+ The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
529
+ Diffusion v2 base model. Use 768 for Stable Diffusion v2.
530
+ upcast_attention (`bool`, *optional*, defaults to `None`):
531
+ Whether the attention computation should always be upcasted.
532
+ kwargs (remaining dictionary of keyword arguments, *optional*):
533
+ Can be used to overwrite load and saveable variables (for example the pipeline components of the
534
+ specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
535
+ method. See example below for more information.
536
+
537
+ Examples:
538
+
539
+ ```py
540
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
541
+
542
+ url = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" # can also be a local path
543
+ model = ControlNetModel.from_single_file(url)
544
+
545
+ url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned.safetensors" # can also be a local path
546
+ pipe = StableDiffusionControlNetPipeline.from_single_file(url, controlnet=controlnet)
547
+ ```
548
+ """
549
+ # import here to avoid circular dependency
550
+ from ..pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
551
+
552
+ config_file = kwargs.pop("config_file", None)
553
+ cache_dir = kwargs.pop("cache_dir", None)
554
+ resume_download = kwargs.pop("resume_download", False)
555
+ force_download = kwargs.pop("force_download", False)
556
+ proxies = kwargs.pop("proxies", None)
557
+ local_files_only = kwargs.pop("local_files_only", None)
558
+ token = kwargs.pop("token", None)
559
+ num_in_channels = kwargs.pop("num_in_channels", None)
560
+ use_linear_projection = kwargs.pop("use_linear_projection", None)
561
+ revision = kwargs.pop("revision", None)
562
+ extract_ema = kwargs.pop("extract_ema", False)
563
+ image_size = kwargs.pop("image_size", None)
564
+ upcast_attention = kwargs.pop("upcast_attention", None)
565
+
566
+ torch_dtype = kwargs.pop("torch_dtype", None)
567
+
568
+ use_safetensors = kwargs.pop("use_safetensors", None)
569
+
570
+ file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
571
+ from_safetensors = file_extension == "safetensors"
572
+
573
+ if from_safetensors and use_safetensors is False:
574
+ raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")
575
+
576
+ # remove huggingface url
577
+ for prefix in ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"]:
578
+ if pretrained_model_link_or_path.startswith(prefix):
579
+ pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]
580
+
581
+ # Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
582
+ ckpt_path = Path(pretrained_model_link_or_path)
583
+ if not ckpt_path.is_file():
584
+ # get repo_id and (potentially nested) file path of ckpt in repo
585
+ repo_id = "/".join(ckpt_path.parts[:2])
586
+ file_path = "/".join(ckpt_path.parts[2:])
587
+
588
+ if file_path.startswith("blob/"):
589
+ file_path = file_path[len("blob/") :]
590
+
591
+ if file_path.startswith("main/"):
592
+ file_path = file_path[len("main/") :]
593
+
594
+ pretrained_model_link_or_path = hf_hub_download(
595
+ repo_id,
596
+ filename=file_path,
597
+ cache_dir=cache_dir,
598
+ resume_download=resume_download,
599
+ proxies=proxies,
600
+ local_files_only=local_files_only,
601
+ token=token,
602
+ revision=revision,
603
+ force_download=force_download,
604
+ )
605
+
606
+ if config_file is None:
607
+ config_url = "https://raw.githubusercontent.com/lllyasviel/ControlNet/main/models/cldm_v15.yaml"
608
+ config_file = BytesIO(requests.get(config_url).content)
609
+
610
+ image_size = image_size or 512
611
+
612
+ controlnet = download_controlnet_from_original_ckpt(
613
+ pretrained_model_link_or_path,
614
+ original_config_file=config_file,
615
+ image_size=image_size,
616
+ extract_ema=extract_ema,
617
+ num_in_channels=num_in_channels,
618
+ upcast_attention=upcast_attention,
619
+ from_safetensors=from_safetensors,
620
+ use_linear_projection=use_linear_projection,
621
+ )
622
+
623
+ if torch_dtype is not None:
624
+ controlnet.to(dtype=torch_dtype)
625
+
626
+ return controlnet
diffusers/loaders/textual_inversion.py ADDED
@@ -0,0 +1,455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Dict, List, Optional, Union
15
+
16
+ import safetensors
17
+ import torch
18
+ from huggingface_hub.utils import validate_hf_hub_args
19
+ from torch import nn
20
+
21
+ from ..utils import _get_model_file, is_accelerate_available, is_transformers_available, logging
22
+
23
+
24
+ if is_transformers_available():
25
+ from transformers import PreTrainedModel, PreTrainedTokenizer
26
+
27
+ if is_accelerate_available():
28
+ from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+ TEXT_INVERSION_NAME = "learned_embeds.bin"
33
+ TEXT_INVERSION_NAME_SAFE = "learned_embeds.safetensors"
34
+
35
+
36
+ @validate_hf_hub_args
37
+ def load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs):
38
+ cache_dir = kwargs.pop("cache_dir", None)
39
+ force_download = kwargs.pop("force_download", False)
40
+ resume_download = kwargs.pop("resume_download", False)
41
+ proxies = kwargs.pop("proxies", None)
42
+ local_files_only = kwargs.pop("local_files_only", None)
43
+ token = kwargs.pop("token", None)
44
+ revision = kwargs.pop("revision", None)
45
+ subfolder = kwargs.pop("subfolder", None)
46
+ weight_name = kwargs.pop("weight_name", None)
47
+ use_safetensors = kwargs.pop("use_safetensors", None)
48
+
49
+ allow_pickle = False
50
+ if use_safetensors is None:
51
+ use_safetensors = True
52
+ allow_pickle = True
53
+
54
+ user_agent = {
55
+ "file_type": "text_inversion",
56
+ "framework": "pytorch",
57
+ }
58
+ state_dicts = []
59
+ for pretrained_model_name_or_path in pretrained_model_name_or_paths:
60
+ if not isinstance(pretrained_model_name_or_path, (dict, torch.Tensor)):
61
+ # 3.1. Load textual inversion file
62
+ model_file = None
63
+
64
+ # Let's first try to load .safetensors weights
65
+ if (use_safetensors and weight_name is None) or (
66
+ weight_name is not None and weight_name.endswith(".safetensors")
67
+ ):
68
+ try:
69
+ model_file = _get_model_file(
70
+ pretrained_model_name_or_path,
71
+ weights_name=weight_name or TEXT_INVERSION_NAME_SAFE,
72
+ cache_dir=cache_dir,
73
+ force_download=force_download,
74
+ resume_download=resume_download,
75
+ proxies=proxies,
76
+ local_files_only=local_files_only,
77
+ token=token,
78
+ revision=revision,
79
+ subfolder=subfolder,
80
+ user_agent=user_agent,
81
+ )
82
+ state_dict = safetensors.torch.load_file(model_file, device="cpu")
83
+ except Exception as e:
84
+ if not allow_pickle:
85
+ raise e
86
+
87
+ model_file = None
88
+
89
+ if model_file is None:
90
+ model_file = _get_model_file(
91
+ pretrained_model_name_or_path,
92
+ weights_name=weight_name or TEXT_INVERSION_NAME,
93
+ cache_dir=cache_dir,
94
+ force_download=force_download,
95
+ resume_download=resume_download,
96
+ proxies=proxies,
97
+ local_files_only=local_files_only,
98
+ token=token,
99
+ revision=revision,
100
+ subfolder=subfolder,
101
+ user_agent=user_agent,
102
+ )
103
+ state_dict = torch.load(model_file, map_location="cpu")
104
+ else:
105
+ state_dict = pretrained_model_name_or_path
106
+
107
+ state_dicts.append(state_dict)
108
+
109
+ return state_dicts
110
+
111
+
112
+ class TextualInversionLoaderMixin:
113
+ r"""
114
+ Load Textual Inversion tokens and embeddings to the tokenizer and text encoder.
115
+ """
116
+
117
+ def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTrainedTokenizer"): # noqa: F821
118
+ r"""
119
+ Processes prompts that include a special token corresponding to a multi-vector textual inversion embedding to
120
+ be replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual
121
+ inversion token or if the textual inversion token is a single vector, the input prompt is returned.
122
+
123
+ Parameters:
124
+ prompt (`str` or list of `str`):
125
+ The prompt or prompts to guide the image generation.
126
+ tokenizer (`PreTrainedTokenizer`):
127
+ The tokenizer responsible for encoding the prompt into input tokens.
128
+
129
+ Returns:
130
+ `str` or list of `str`: The converted prompt
131
+ """
132
+ if not isinstance(prompt, List):
133
+ prompts = [prompt]
134
+ else:
135
+ prompts = prompt
136
+
137
+ prompts = [self._maybe_convert_prompt(p, tokenizer) for p in prompts]
138
+
139
+ if not isinstance(prompt, List):
140
+ return prompts[0]
141
+
142
+ return prompts
143
+
144
+ def _maybe_convert_prompt(self, prompt: str, tokenizer: "PreTrainedTokenizer"): # noqa: F821
145
+ r"""
146
+ Maybe convert a prompt into a "multi vector"-compatible prompt. If the prompt includes a token that corresponds
147
+ to a multi-vector textual inversion embedding, this function will process the prompt so that the special token
148
+ is replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual
149
+ inversion token or a textual inversion token that is a single vector, the input prompt is simply returned.
150
+
151
+ Parameters:
152
+ prompt (`str`):
153
+ The prompt to guide the image generation.
154
+ tokenizer (`PreTrainedTokenizer`):
155
+ The tokenizer responsible for encoding the prompt into input tokens.
156
+
157
+ Returns:
158
+ `str`: The converted prompt
159
+ """
160
+ tokens = tokenizer.tokenize(prompt)
161
+ unique_tokens = set(tokens)
162
+ for token in unique_tokens:
163
+ if token in tokenizer.added_tokens_encoder:
164
+ replacement = token
165
+ i = 1
166
+ while f"{token}_{i}" in tokenizer.added_tokens_encoder:
167
+ replacement += f" {token}_{i}"
168
+ i += 1
169
+
170
+ prompt = prompt.replace(token, replacement)
171
+
172
+ return prompt
173
+
174
+ def _check_text_inv_inputs(self, tokenizer, text_encoder, pretrained_model_name_or_paths, tokens):
175
+ if tokenizer is None:
176
+ raise ValueError(
177
+ f"{self.__class__.__name__} requires `self.tokenizer` or passing a `tokenizer` of type `PreTrainedTokenizer` for calling"
178
+ f" `{self.load_textual_inversion.__name__}`"
179
+ )
180
+
181
+ if text_encoder is None:
182
+ raise ValueError(
183
+ f"{self.__class__.__name__} requires `self.text_encoder` or passing a `text_encoder` of type `PreTrainedModel` for calling"
184
+ f" `{self.load_textual_inversion.__name__}`"
185
+ )
186
+
187
+ if len(pretrained_model_name_or_paths) > 1 and len(pretrained_model_name_or_paths) != len(tokens):
188
+ raise ValueError(
189
+ f"You have passed a list of models of length {len(pretrained_model_name_or_paths)}, and list of tokens of length {len(tokens)} "
190
+ f"Make sure both lists have the same length."
191
+ )
192
+
193
+ valid_tokens = [t for t in tokens if t is not None]
194
+ if len(set(valid_tokens)) < len(valid_tokens):
195
+ raise ValueError(f"You have passed a list of tokens that contains duplicates: {tokens}")
196
+
197
+ @staticmethod
198
+ def _retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer):
199
+ all_tokens = []
200
+ all_embeddings = []
201
+ for state_dict, token in zip(state_dicts, tokens):
202
+ if isinstance(state_dict, torch.Tensor):
203
+ if token is None:
204
+ raise ValueError(
205
+ "You are trying to load a textual inversion embedding that has been saved as a PyTorch tensor. Make sure to pass the name of the corresponding token in this case: `token=...`."
206
+ )
207
+ loaded_token = token
208
+ embedding = state_dict
209
+ elif len(state_dict) == 1:
210
+ # diffusers
211
+ loaded_token, embedding = next(iter(state_dict.items()))
212
+ elif "string_to_param" in state_dict:
213
+ # A1111
214
+ loaded_token = state_dict["name"]
215
+ embedding = state_dict["string_to_param"]["*"]
216
+ else:
217
+ raise ValueError(
218
+ f"Loaded state dictonary is incorrect: {state_dict}. \n\n"
219
+ "Please verify that the loaded state dictionary of the textual embedding either only has a single key or includes the `string_to_param`"
220
+ " input key."
221
+ )
222
+
223
+ if token is not None and loaded_token != token:
224
+ logger.info(f"The loaded token: {loaded_token} is overwritten by the passed token {token}.")
225
+ else:
226
+ token = loaded_token
227
+
228
+ if token in tokenizer.get_vocab():
229
+ raise ValueError(
230
+ f"Token {token} already in tokenizer vocabulary. Please choose a different token name or remove {token} and embedding from the tokenizer and text encoder."
231
+ )
232
+
233
+ all_tokens.append(token)
234
+ all_embeddings.append(embedding)
235
+
236
+ return all_tokens, all_embeddings
237
+
238
+ @staticmethod
239
+ def _extend_tokens_and_embeddings(tokens, embeddings, tokenizer):
240
+ all_tokens = []
241
+ all_embeddings = []
242
+
243
+ for embedding, token in zip(embeddings, tokens):
244
+ if f"{token}_1" in tokenizer.get_vocab():
245
+ multi_vector_tokens = [token]
246
+ i = 1
247
+ while f"{token}_{i}" in tokenizer.added_tokens_encoder:
248
+ multi_vector_tokens.append(f"{token}_{i}")
249
+ i += 1
250
+
251
+ raise ValueError(
252
+ f"Multi-vector Token {multi_vector_tokens} already in tokenizer vocabulary. Please choose a different token name or remove the {multi_vector_tokens} and embedding from the tokenizer and text encoder."
253
+ )
254
+
255
+ is_multi_vector = len(embedding.shape) > 1 and embedding.shape[0] > 1
256
+ if is_multi_vector:
257
+ all_tokens += [token] + [f"{token}_{i}" for i in range(1, embedding.shape[0])]
258
+ all_embeddings += [e for e in embedding] # noqa: C416
259
+ else:
260
+ all_tokens += [token]
261
+ all_embeddings += [embedding[0]] if len(embedding.shape) > 1 else [embedding]
262
+
263
+ return all_tokens, all_embeddings
264
+
265
+ @validate_hf_hub_args
266
+ def load_textual_inversion(
267
+ self,
268
+ pretrained_model_name_or_path: Union[str, List[str], Dict[str, torch.Tensor], List[Dict[str, torch.Tensor]]],
269
+ token: Optional[Union[str, List[str]]] = None,
270
+ tokenizer: Optional["PreTrainedTokenizer"] = None, # noqa: F821
271
+ text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821
272
+ **kwargs,
273
+ ):
274
+ r"""
275
+ Load Textual Inversion embeddings into the text encoder of [`StableDiffusionPipeline`] (both 🤗 Diffusers and
276
+ Automatic1111 formats are supported).
277
+
278
+ Parameters:
279
+ pretrained_model_name_or_path (`str` or `os.PathLike` or `List[str or os.PathLike]` or `Dict` or `List[Dict]`):
280
+ Can be either one of the following or a list of them:
281
+
282
+ - A string, the *model id* (for example `sd-concepts-library/low-poly-hd-logos-icons`) of a
283
+ pretrained model hosted on the Hub.
284
+ - A path to a *directory* (for example `./my_text_inversion_directory/`) containing the textual
285
+ inversion weights.
286
+ - A path to a *file* (for example `./my_text_inversions.pt`) containing textual inversion weights.
287
+ - A [torch state
288
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
289
+
290
+ token (`str` or `List[str]`, *optional*):
291
+ Override the token to use for the textual inversion weights. If `pretrained_model_name_or_path` is a
292
+ list, then `token` must also be a list of equal length.
293
+ text_encoder ([`~transformers.CLIPTextModel`], *optional*):
294
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
295
+ If not specified, function will take self.tokenizer.
296
+ tokenizer ([`~transformers.CLIPTokenizer`], *optional*):
297
+ A `CLIPTokenizer` to tokenize text. If not specified, function will take self.tokenizer.
298
+ weight_name (`str`, *optional*):
299
+ Name of a custom weight file. This should be used when:
300
+
301
+ - The saved textual inversion file is in 🤗 Diffusers format, but was saved under a specific weight
302
+ name such as `text_inv.bin`.
303
+ - The saved textual inversion file is in the Automatic1111 format.
304
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
305
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
306
+ is not used.
307
+ force_download (`bool`, *optional*, defaults to `False`):
308
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
309
+ cached versions if they exist.
310
+ resume_download (`bool`, *optional*, defaults to `False`):
311
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
312
+ incompletely downloaded files are deleted.
313
+ proxies (`Dict[str, str]`, *optional*):
314
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
315
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
316
+ local_files_only (`bool`, *optional*, defaults to `False`):
317
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
318
+ won't be downloaded from the Hub.
319
+ token (`str` or *bool*, *optional*):
320
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
321
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
322
+ revision (`str`, *optional*, defaults to `"main"`):
323
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
324
+ allowed by Git.
325
+ subfolder (`str`, *optional*, defaults to `""`):
326
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
327
+ mirror (`str`, *optional*):
328
+ Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
329
+ guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
330
+ information.
331
+
332
+ Example:
333
+
334
+ To load a Textual Inversion embedding vector in 🤗 Diffusers format:
335
+
336
+ ```py
337
+ from diffusers import StableDiffusionPipeline
338
+ import torch
339
+
340
+ model_id = "runwayml/stable-diffusion-v1-5"
341
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
342
+
343
+ pipe.load_textual_inversion("sd-concepts-library/cat-toy")
344
+
345
+ prompt = "A <cat-toy> backpack"
346
+
347
+ image = pipe(prompt, num_inference_steps=50).images[0]
348
+ image.save("cat-backpack.png")
349
+ ```
350
+
351
+ To load a Textual Inversion embedding vector in Automatic1111 format, make sure to download the vector first
352
+ (for example from [civitAI](https://civitai.com/models/3036?modelVersionId=9857)) and then load the vector
353
+ locally:
354
+
355
+ ```py
356
+ from diffusers import StableDiffusionPipeline
357
+ import torch
358
+
359
+ model_id = "runwayml/stable-diffusion-v1-5"
360
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
361
+
362
+ pipe.load_textual_inversion("./charturnerv2.pt", token="charturnerv2")
363
+
364
+ prompt = "charturnerv2, multiple views of the same character in the same outfit, a character turnaround of a woman wearing a black jacket and red shirt, best quality, intricate details."
365
+
366
+ image = pipe(prompt, num_inference_steps=50).images[0]
367
+ image.save("character.png")
368
+ ```
369
+
370
+ """
371
+ # 1. Set correct tokenizer and text encoder
372
+ tokenizer = tokenizer or getattr(self, "tokenizer", None)
373
+ text_encoder = text_encoder or getattr(self, "text_encoder", None)
374
+
375
+ # 2. Normalize inputs
376
+ pretrained_model_name_or_paths = (
377
+ [pretrained_model_name_or_path]
378
+ if not isinstance(pretrained_model_name_or_path, list)
379
+ else pretrained_model_name_or_path
380
+ )
381
+ tokens = [token] if not isinstance(token, list) else token
382
+ if tokens[0] is None:
383
+ tokens = tokens * len(pretrained_model_name_or_paths)
384
+
385
+ # 3. Check inputs
386
+ self._check_text_inv_inputs(tokenizer, text_encoder, pretrained_model_name_or_paths, tokens)
387
+
388
+ # 4. Load state dicts of textual embeddings
389
+ state_dicts = load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs)
390
+
391
+ # 4.1 Handle the special case when state_dict is a tensor that contains n embeddings for n tokens
392
+ if len(tokens) > 1 and len(state_dicts) == 1:
393
+ if isinstance(state_dicts[0], torch.Tensor):
394
+ state_dicts = list(state_dicts[0])
395
+ if len(tokens) != len(state_dicts):
396
+ raise ValueError(
397
+ f"You have passed a state_dict contains {len(state_dicts)} embeddings, and list of tokens of length {len(tokens)} "
398
+ f"Make sure both have the same length."
399
+ )
400
+
401
+ # 4. Retrieve tokens and embeddings
402
+ tokens, embeddings = self._retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer)
403
+
404
+ # 5. Extend tokens and embeddings for multi vector
405
+ tokens, embeddings = self._extend_tokens_and_embeddings(tokens, embeddings, tokenizer)
406
+
407
+ # 6. Make sure all embeddings have the correct size
408
+ expected_emb_dim = text_encoder.get_input_embeddings().weight.shape[-1]
409
+ if any(expected_emb_dim != emb.shape[-1] for emb in embeddings):
410
+ raise ValueError(
411
+ "Loaded embeddings are of incorrect shape. Expected each textual inversion embedding "
412
+ "to be of shape {input_embeddings.shape[-1]}, but are {embeddings.shape[-1]} "
413
+ )
414
+
415
+ # 7. Now we can be sure that loading the embedding matrix works
416
+ # < Unsafe code:
417
+
418
+ # 7.1 Offload all hooks in case the pipeline was cpu offloaded before make sure, we offload and onload again
419
+ is_model_cpu_offload = False
420
+ is_sequential_cpu_offload = False
421
+ for _, component in self.components.items():
422
+ if isinstance(component, nn.Module):
423
+ if hasattr(component, "_hf_hook"):
424
+ is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
425
+ is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
426
+ logger.info(
427
+ "Accelerate hooks detected. Since you have called `load_textual_inversion()`, the previous hooks will be first removed. Then the textual inversion parameters will be loaded and the hooks will be applied again."
428
+ )
429
+ remove_hook_from_module(component, recurse=is_sequential_cpu_offload)
430
+
431
+ # 7.2 save expected device and dtype
432
+ device = text_encoder.device
433
+ dtype = text_encoder.dtype
434
+
435
+ # 7.3 Increase token embedding matrix
436
+ text_encoder.resize_token_embeddings(len(tokenizer) + len(tokens))
437
+ input_embeddings = text_encoder.get_input_embeddings().weight
438
+
439
+ # 7.4 Load token and embedding
440
+ for token, embedding in zip(tokens, embeddings):
441
+ # add tokens and get ids
442
+ tokenizer.add_tokens(token)
443
+ token_id = tokenizer.convert_tokens_to_ids(token)
444
+ input_embeddings.data[token_id] = embedding
445
+ logger.info(f"Loaded textual inversion embedding for {token}.")
446
+
447
+ input_embeddings.to(dtype=dtype, device=device)
448
+
449
+ # 7.5 Offload the model again
450
+ if is_model_cpu_offload:
451
+ self.enable_model_cpu_offload()
452
+ elif is_sequential_cpu_offload:
453
+ self.enable_sequential_cpu_offload()
454
+
455
+ # / Unsafe Code >
diffusers/loaders/unet.py ADDED
@@ -0,0 +1,828 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import inspect
15
+ import os
16
+ from collections import defaultdict
17
+ from contextlib import nullcontext
18
+ from functools import partial
19
+ from typing import Callable, Dict, List, Optional, Union
20
+
21
+ import safetensors
22
+ import torch
23
+ import torch.nn.functional as F
24
+ from huggingface_hub.utils import validate_hf_hub_args
25
+ from torch import nn
26
+
27
+ from ..models.embeddings import ImageProjection, IPAdapterFullImageProjection, IPAdapterPlusImageProjection
28
+ from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_model_dict_into_meta
29
+ from ..utils import (
30
+ USE_PEFT_BACKEND,
31
+ _get_model_file,
32
+ delete_adapter_layers,
33
+ is_accelerate_available,
34
+ logging,
35
+ set_adapter_layers,
36
+ set_weights_and_activate_adapters,
37
+ )
38
+ from .utils import AttnProcsLayers
39
+
40
+
41
+ if is_accelerate_available():
42
+ from accelerate import init_empty_weights
43
+ from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+
48
+ TEXT_ENCODER_NAME = "text_encoder"
49
+ UNET_NAME = "unet"
50
+
51
+ LORA_WEIGHT_NAME = "pytorch_lora_weights.bin"
52
+ LORA_WEIGHT_NAME_SAFE = "pytorch_lora_weights.safetensors"
53
+
54
+ CUSTOM_DIFFUSION_WEIGHT_NAME = "pytorch_custom_diffusion_weights.bin"
55
+ CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE = "pytorch_custom_diffusion_weights.safetensors"
56
+
57
+
58
+ class UNet2DConditionLoadersMixin:
59
+ """
60
+ Load LoRA layers into a [`UNet2DCondtionModel`].
61
+ """
62
+
63
+ text_encoder_name = TEXT_ENCODER_NAME
64
+ unet_name = UNET_NAME
65
+
66
+ @validate_hf_hub_args
67
+ def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
68
+ r"""
69
+ Load pretrained attention processor layers into [`UNet2DConditionModel`]. Attention processor layers have to be
70
+ defined in
71
+ [`attention_processor.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py)
72
+ and be a `torch.nn.Module` class.
73
+
74
+ Parameters:
75
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
76
+ Can be either:
77
+
78
+ - A string, the model id (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
79
+ the Hub.
80
+ - A path to a directory (for example `./my_model_directory`) containing the model weights saved
81
+ with [`ModelMixin.save_pretrained`].
82
+ - A [torch state
83
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
84
+
85
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
86
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
87
+ is not used.
88
+ force_download (`bool`, *optional*, defaults to `False`):
89
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
90
+ cached versions if they exist.
91
+ resume_download (`bool`, *optional*, defaults to `False`):
92
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
93
+ incompletely downloaded files are deleted.
94
+ proxies (`Dict[str, str]`, *optional*):
95
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
96
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
97
+ local_files_only (`bool`, *optional*, defaults to `False`):
98
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
99
+ won't be downloaded from the Hub.
100
+ token (`str` or *bool*, *optional*):
101
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
102
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
103
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
104
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
105
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
106
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
107
+ argument to `True` will raise an error.
108
+ revision (`str`, *optional*, defaults to `"main"`):
109
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
110
+ allowed by Git.
111
+ subfolder (`str`, *optional*, defaults to `""`):
112
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
113
+ mirror (`str`, *optional*):
114
+ Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not
115
+ guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
116
+ information.
117
+
118
+ Example:
119
+
120
+ ```py
121
+ from diffusers import AutoPipelineForText2Image
122
+ import torch
123
+
124
+ pipeline = AutoPipelineForText2Image.from_pretrained(
125
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
126
+ ).to("cuda")
127
+ pipeline.unet.load_attn_procs(
128
+ "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
129
+ )
130
+ ```
131
+ """
132
+ from ..models.attention_processor import CustomDiffusionAttnProcessor
133
+ from ..models.lora import LoRACompatibleConv, LoRACompatibleLinear, LoRAConv2dLayer, LoRALinearLayer
134
+
135
+ cache_dir = kwargs.pop("cache_dir", None)
136
+ force_download = kwargs.pop("force_download", False)
137
+ resume_download = kwargs.pop("resume_download", False)
138
+ proxies = kwargs.pop("proxies", None)
139
+ local_files_only = kwargs.pop("local_files_only", None)
140
+ token = kwargs.pop("token", None)
141
+ revision = kwargs.pop("revision", None)
142
+ subfolder = kwargs.pop("subfolder", None)
143
+ weight_name = kwargs.pop("weight_name", None)
144
+ use_safetensors = kwargs.pop("use_safetensors", None)
145
+ low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
146
+ # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.
147
+ # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
148
+ network_alphas = kwargs.pop("network_alphas", None)
149
+
150
+ _pipeline = kwargs.pop("_pipeline", None)
151
+
152
+ is_network_alphas_none = network_alphas is None
153
+
154
+ allow_pickle = False
155
+
156
+ if use_safetensors is None:
157
+ use_safetensors = True
158
+ allow_pickle = True
159
+
160
+ user_agent = {
161
+ "file_type": "attn_procs_weights",
162
+ "framework": "pytorch",
163
+ }
164
+
165
+ if low_cpu_mem_usage and not is_accelerate_available():
166
+ low_cpu_mem_usage = False
167
+ logger.warning(
168
+ "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
169
+ " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
170
+ " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
171
+ " install accelerate\n```\n."
172
+ )
173
+
174
+ model_file = None
175
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
176
+ # Let's first try to load .safetensors weights
177
+ if (use_safetensors and weight_name is None) or (
178
+ weight_name is not None and weight_name.endswith(".safetensors")
179
+ ):
180
+ try:
181
+ model_file = _get_model_file(
182
+ pretrained_model_name_or_path_or_dict,
183
+ weights_name=weight_name or LORA_WEIGHT_NAME_SAFE,
184
+ cache_dir=cache_dir,
185
+ force_download=force_download,
186
+ resume_download=resume_download,
187
+ proxies=proxies,
188
+ local_files_only=local_files_only,
189
+ token=token,
190
+ revision=revision,
191
+ subfolder=subfolder,
192
+ user_agent=user_agent,
193
+ )
194
+ state_dict = safetensors.torch.load_file(model_file, device="cpu")
195
+ except IOError as e:
196
+ if not allow_pickle:
197
+ raise e
198
+ # try loading non-safetensors weights
199
+ pass
200
+ if model_file is None:
201
+ model_file = _get_model_file(
202
+ pretrained_model_name_or_path_or_dict,
203
+ weights_name=weight_name or LORA_WEIGHT_NAME,
204
+ cache_dir=cache_dir,
205
+ force_download=force_download,
206
+ resume_download=resume_download,
207
+ proxies=proxies,
208
+ local_files_only=local_files_only,
209
+ token=token,
210
+ revision=revision,
211
+ subfolder=subfolder,
212
+ user_agent=user_agent,
213
+ )
214
+ state_dict = torch.load(model_file, map_location="cpu")
215
+ else:
216
+ state_dict = pretrained_model_name_or_path_or_dict
217
+
218
+ # fill attn processors
219
+ lora_layers_list = []
220
+
221
+ is_lora = all(("lora" in k or k.endswith(".alpha")) for k in state_dict.keys()) and not USE_PEFT_BACKEND
222
+ is_custom_diffusion = any("custom_diffusion" in k for k in state_dict.keys())
223
+
224
+ if is_lora:
225
+ # correct keys
226
+ state_dict, network_alphas = self.convert_state_dict_legacy_attn_format(state_dict, network_alphas)
227
+
228
+ if network_alphas is not None:
229
+ network_alphas_keys = list(network_alphas.keys())
230
+ used_network_alphas_keys = set()
231
+
232
+ lora_grouped_dict = defaultdict(dict)
233
+ mapped_network_alphas = {}
234
+
235
+ all_keys = list(state_dict.keys())
236
+ for key in all_keys:
237
+ value = state_dict.pop(key)
238
+ attn_processor_key, sub_key = ".".join(key.split(".")[:-3]), ".".join(key.split(".")[-3:])
239
+ lora_grouped_dict[attn_processor_key][sub_key] = value
240
+
241
+ # Create another `mapped_network_alphas` dictionary so that we can properly map them.
242
+ if network_alphas is not None:
243
+ for k in network_alphas_keys:
244
+ if k.replace(".alpha", "") in key:
245
+ mapped_network_alphas.update({attn_processor_key: network_alphas.get(k)})
246
+ used_network_alphas_keys.add(k)
247
+
248
+ if not is_network_alphas_none:
249
+ if len(set(network_alphas_keys) - used_network_alphas_keys) > 0:
250
+ raise ValueError(
251
+ f"The `network_alphas` has to be empty at this point but has the following keys \n\n {', '.join(network_alphas.keys())}"
252
+ )
253
+
254
+ if len(state_dict) > 0:
255
+ raise ValueError(
256
+ f"The `state_dict` has to be empty at this point but has the following keys \n\n {', '.join(state_dict.keys())}"
257
+ )
258
+
259
+ for key, value_dict in lora_grouped_dict.items():
260
+ attn_processor = self
261
+ for sub_key in key.split("."):
262
+ attn_processor = getattr(attn_processor, sub_key)
263
+
264
+ # Process non-attention layers, which don't have to_{k,v,q,out_proj}_lora layers
265
+ # or add_{k,v,q,out_proj}_proj_lora layers.
266
+ rank = value_dict["lora.down.weight"].shape[0]
267
+
268
+ if isinstance(attn_processor, LoRACompatibleConv):
269
+ in_features = attn_processor.in_channels
270
+ out_features = attn_processor.out_channels
271
+ kernel_size = attn_processor.kernel_size
272
+
273
+ ctx = init_empty_weights if low_cpu_mem_usage else nullcontext
274
+ with ctx():
275
+ lora = LoRAConv2dLayer(
276
+ in_features=in_features,
277
+ out_features=out_features,
278
+ rank=rank,
279
+ kernel_size=kernel_size,
280
+ stride=attn_processor.stride,
281
+ padding=attn_processor.padding,
282
+ network_alpha=mapped_network_alphas.get(key),
283
+ )
284
+ elif isinstance(attn_processor, LoRACompatibleLinear):
285
+ ctx = init_empty_weights if low_cpu_mem_usage else nullcontext
286
+ with ctx():
287
+ lora = LoRALinearLayer(
288
+ attn_processor.in_features,
289
+ attn_processor.out_features,
290
+ rank,
291
+ mapped_network_alphas.get(key),
292
+ )
293
+ else:
294
+ raise ValueError(f"Module {key} is not a LoRACompatibleConv or LoRACompatibleLinear module.")
295
+
296
+ value_dict = {k.replace("lora.", ""): v for k, v in value_dict.items()}
297
+ lora_layers_list.append((attn_processor, lora))
298
+
299
+ if low_cpu_mem_usage:
300
+ device = next(iter(value_dict.values())).device
301
+ dtype = next(iter(value_dict.values())).dtype
302
+ load_model_dict_into_meta(lora, value_dict, device=device, dtype=dtype)
303
+ else:
304
+ lora.load_state_dict(value_dict)
305
+
306
+ elif is_custom_diffusion:
307
+ attn_processors = {}
308
+ custom_diffusion_grouped_dict = defaultdict(dict)
309
+ for key, value in state_dict.items():
310
+ if len(value) == 0:
311
+ custom_diffusion_grouped_dict[key] = {}
312
+ else:
313
+ if "to_out" in key:
314
+ attn_processor_key, sub_key = ".".join(key.split(".")[:-3]), ".".join(key.split(".")[-3:])
315
+ else:
316
+ attn_processor_key, sub_key = ".".join(key.split(".")[:-2]), ".".join(key.split(".")[-2:])
317
+ custom_diffusion_grouped_dict[attn_processor_key][sub_key] = value
318
+
319
+ for key, value_dict in custom_diffusion_grouped_dict.items():
320
+ if len(value_dict) == 0:
321
+ attn_processors[key] = CustomDiffusionAttnProcessor(
322
+ train_kv=False, train_q_out=False, hidden_size=None, cross_attention_dim=None
323
+ )
324
+ else:
325
+ cross_attention_dim = value_dict["to_k_custom_diffusion.weight"].shape[1]
326
+ hidden_size = value_dict["to_k_custom_diffusion.weight"].shape[0]
327
+ train_q_out = True if "to_q_custom_diffusion.weight" in value_dict else False
328
+ attn_processors[key] = CustomDiffusionAttnProcessor(
329
+ train_kv=True,
330
+ train_q_out=train_q_out,
331
+ hidden_size=hidden_size,
332
+ cross_attention_dim=cross_attention_dim,
333
+ )
334
+ attn_processors[key].load_state_dict(value_dict)
335
+ elif USE_PEFT_BACKEND:
336
+ # In that case we have nothing to do as loading the adapter weights is already handled above by `set_peft_model_state_dict`
337
+ # on the Unet
338
+ pass
339
+ else:
340
+ raise ValueError(
341
+ f"{model_file} does not seem to be in the correct format expected by LoRA or Custom Diffusion training."
342
+ )
343
+
344
+ # <Unsafe code
345
+ # We can be sure that the following works as it just sets attention processors, lora layers and puts all in the same dtype
346
+ # Now we remove any existing hooks to
347
+ is_model_cpu_offload = False
348
+ is_sequential_cpu_offload = False
349
+
350
+ # For PEFT backend the Unet is already offloaded at this stage as it is handled inside `lora_lora_weights_into_unet`
351
+ if not USE_PEFT_BACKEND:
352
+ if _pipeline is not None:
353
+ for _, component in _pipeline.components.items():
354
+ if isinstance(component, nn.Module) and hasattr(component, "_hf_hook"):
355
+ is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
356
+ is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
357
+
358
+ logger.info(
359
+ "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
360
+ )
361
+ remove_hook_from_module(component, recurse=is_sequential_cpu_offload)
362
+
363
+ # only custom diffusion needs to set attn processors
364
+ if is_custom_diffusion:
365
+ self.set_attn_processor(attn_processors)
366
+
367
+ # set lora layers
368
+ for target_module, lora_layer in lora_layers_list:
369
+ target_module.set_lora_layer(lora_layer)
370
+
371
+ self.to(dtype=self.dtype, device=self.device)
372
+
373
+ # Offload back.
374
+ if is_model_cpu_offload:
375
+ _pipeline.enable_model_cpu_offload()
376
+ elif is_sequential_cpu_offload:
377
+ _pipeline.enable_sequential_cpu_offload()
378
+ # Unsafe code />
379
+
380
+ def convert_state_dict_legacy_attn_format(self, state_dict, network_alphas):
381
+ is_new_lora_format = all(
382
+ key.startswith(self.unet_name) or key.startswith(self.text_encoder_name) for key in state_dict.keys()
383
+ )
384
+ if is_new_lora_format:
385
+ # Strip the `"unet"` prefix.
386
+ is_text_encoder_present = any(key.startswith(self.text_encoder_name) for key in state_dict.keys())
387
+ if is_text_encoder_present:
388
+ warn_message = "The state_dict contains LoRA params corresponding to the text encoder which are not being used here. To use both UNet and text encoder related LoRA params, use [`pipe.load_lora_weights()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraLoaderMixin.load_lora_weights)."
389
+ logger.warn(warn_message)
390
+ unet_keys = [k for k in state_dict.keys() if k.startswith(self.unet_name)]
391
+ state_dict = {k.replace(f"{self.unet_name}.", ""): v for k, v in state_dict.items() if k in unet_keys}
392
+
393
+ # change processor format to 'pure' LoRACompatibleLinear format
394
+ if any("processor" in k.split(".") for k in state_dict.keys()):
395
+
396
+ def format_to_lora_compatible(key):
397
+ if "processor" not in key.split("."):
398
+ return key
399
+ return key.replace(".processor", "").replace("to_out_lora", "to_out.0.lora").replace("_lora", ".lora")
400
+
401
+ state_dict = {format_to_lora_compatible(k): v for k, v in state_dict.items()}
402
+
403
+ if network_alphas is not None:
404
+ network_alphas = {format_to_lora_compatible(k): v for k, v in network_alphas.items()}
405
+ return state_dict, network_alphas
406
+
407
+ def save_attn_procs(
408
+ self,
409
+ save_directory: Union[str, os.PathLike],
410
+ is_main_process: bool = True,
411
+ weight_name: str = None,
412
+ save_function: Callable = None,
413
+ safe_serialization: bool = True,
414
+ **kwargs,
415
+ ):
416
+ r"""
417
+ Save attention processor layers to a directory so that it can be reloaded with the
418
+ [`~loaders.UNet2DConditionLoadersMixin.load_attn_procs`] method.
419
+
420
+ Arguments:
421
+ save_directory (`str` or `os.PathLike`):
422
+ Directory to save an attention processor to (will be created if it doesn't exist).
423
+ is_main_process (`bool`, *optional*, defaults to `True`):
424
+ Whether the process calling this is the main process or not. Useful during distributed training and you
425
+ need to call this function on all processes. In this case, set `is_main_process=True` only on the main
426
+ process to avoid race conditions.
427
+ save_function (`Callable`):
428
+ The function to use to save the state dictionary. Useful during distributed training when you need to
429
+ replace `torch.save` with another method. Can be configured with the environment variable
430
+ `DIFFUSERS_SAVE_MODE`.
431
+ safe_serialization (`bool`, *optional*, defaults to `True`):
432
+ Whether to save the model using `safetensors` or with `pickle`.
433
+
434
+ Example:
435
+
436
+ ```py
437
+ import torch
438
+ from diffusers import DiffusionPipeline
439
+
440
+ pipeline = DiffusionPipeline.from_pretrained(
441
+ "CompVis/stable-diffusion-v1-4",
442
+ torch_dtype=torch.float16,
443
+ ).to("cuda")
444
+ pipeline.unet.load_attn_procs("path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin")
445
+ pipeline.unet.save_attn_procs("path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin")
446
+ ```
447
+ """
448
+ from ..models.attention_processor import (
449
+ CustomDiffusionAttnProcessor,
450
+ CustomDiffusionAttnProcessor2_0,
451
+ CustomDiffusionXFormersAttnProcessor,
452
+ )
453
+
454
+ if os.path.isfile(save_directory):
455
+ logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
456
+ return
457
+
458
+ if save_function is None:
459
+ if safe_serialization:
460
+
461
+ def save_function(weights, filename):
462
+ return safetensors.torch.save_file(weights, filename, metadata={"format": "pt"})
463
+
464
+ else:
465
+ save_function = torch.save
466
+
467
+ os.makedirs(save_directory, exist_ok=True)
468
+
469
+ is_custom_diffusion = any(
470
+ isinstance(
471
+ x,
472
+ (CustomDiffusionAttnProcessor, CustomDiffusionAttnProcessor2_0, CustomDiffusionXFormersAttnProcessor),
473
+ )
474
+ for (_, x) in self.attn_processors.items()
475
+ )
476
+ if is_custom_diffusion:
477
+ model_to_save = AttnProcsLayers(
478
+ {
479
+ y: x
480
+ for (y, x) in self.attn_processors.items()
481
+ if isinstance(
482
+ x,
483
+ (
484
+ CustomDiffusionAttnProcessor,
485
+ CustomDiffusionAttnProcessor2_0,
486
+ CustomDiffusionXFormersAttnProcessor,
487
+ ),
488
+ )
489
+ }
490
+ )
491
+ state_dict = model_to_save.state_dict()
492
+ for name, attn in self.attn_processors.items():
493
+ if len(attn.state_dict()) == 0:
494
+ state_dict[name] = {}
495
+ else:
496
+ model_to_save = AttnProcsLayers(self.attn_processors)
497
+ state_dict = model_to_save.state_dict()
498
+
499
+ if weight_name is None:
500
+ if safe_serialization:
501
+ weight_name = CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE if is_custom_diffusion else LORA_WEIGHT_NAME_SAFE
502
+ else:
503
+ weight_name = CUSTOM_DIFFUSION_WEIGHT_NAME if is_custom_diffusion else LORA_WEIGHT_NAME
504
+
505
+ # Save the model
506
+ save_function(state_dict, os.path.join(save_directory, weight_name))
507
+ logger.info(f"Model weights saved in {os.path.join(save_directory, weight_name)}")
508
+
509
+ def fuse_lora(self, lora_scale=1.0, safe_fusing=False, adapter_names=None):
510
+ self.lora_scale = lora_scale
511
+ self._safe_fusing = safe_fusing
512
+ self.apply(partial(self._fuse_lora_apply, adapter_names=adapter_names))
513
+
514
+ def _fuse_lora_apply(self, module, adapter_names=None):
515
+ if not USE_PEFT_BACKEND:
516
+ if hasattr(module, "_fuse_lora"):
517
+ module._fuse_lora(self.lora_scale, self._safe_fusing)
518
+
519
+ if adapter_names is not None:
520
+ raise ValueError(
521
+ "The `adapter_names` argument is not supported in your environment. Please switch"
522
+ " to PEFT backend to use this argument by installing latest PEFT and transformers."
523
+ " `pip install -U peft transformers`"
524
+ )
525
+ else:
526
+ from peft.tuners.tuners_utils import BaseTunerLayer
527
+
528
+ merge_kwargs = {"safe_merge": self._safe_fusing}
529
+
530
+ if isinstance(module, BaseTunerLayer):
531
+ if self.lora_scale != 1.0:
532
+ module.scale_layer(self.lora_scale)
533
+
534
+ # For BC with prevous PEFT versions, we need to check the signature
535
+ # of the `merge` method to see if it supports the `adapter_names` argument.
536
+ supported_merge_kwargs = list(inspect.signature(module.merge).parameters)
537
+ if "adapter_names" in supported_merge_kwargs:
538
+ merge_kwargs["adapter_names"] = adapter_names
539
+ elif "adapter_names" not in supported_merge_kwargs and adapter_names is not None:
540
+ raise ValueError(
541
+ "The `adapter_names` argument is not supported with your PEFT version. Please upgrade"
542
+ " to the latest version of PEFT. `pip install -U peft`"
543
+ )
544
+
545
+ module.merge(**merge_kwargs)
546
+
547
+ def unfuse_lora(self):
548
+ self.apply(self._unfuse_lora_apply)
549
+
550
+ def _unfuse_lora_apply(self, module):
551
+ if not USE_PEFT_BACKEND:
552
+ if hasattr(module, "_unfuse_lora"):
553
+ module._unfuse_lora()
554
+ else:
555
+ from peft.tuners.tuners_utils import BaseTunerLayer
556
+
557
+ if isinstance(module, BaseTunerLayer):
558
+ module.unmerge()
559
+
560
+ def set_adapters(
561
+ self,
562
+ adapter_names: Union[List[str], str],
563
+ weights: Optional[Union[List[float], float]] = None,
564
+ ):
565
+ """
566
+ Set the currently active adapters for use in the UNet.
567
+
568
+ Args:
569
+ adapter_names (`List[str]` or `str`):
570
+ The names of the adapters to use.
571
+ adapter_weights (`Union[List[float], float]`, *optional*):
572
+ The adapter(s) weights to use with the UNet. If `None`, the weights are set to `1.0` for all the
573
+ adapters.
574
+
575
+ Example:
576
+
577
+ ```py
578
+ from diffusers import AutoPipelineForText2Image
579
+ import torch
580
+
581
+ pipeline = AutoPipelineForText2Image.from_pretrained(
582
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
583
+ ).to("cuda")
584
+ pipeline.load_lora_weights(
585
+ "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
586
+ )
587
+ pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
588
+ pipeline.set_adapters(["cinematic", "pixel"], adapter_weights=[0.5, 0.5])
589
+ ```
590
+ """
591
+ if not USE_PEFT_BACKEND:
592
+ raise ValueError("PEFT backend is required for `set_adapters()`.")
593
+
594
+ adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names
595
+
596
+ if weights is None:
597
+ weights = [1.0] * len(adapter_names)
598
+ elif isinstance(weights, float):
599
+ weights = [weights] * len(adapter_names)
600
+
601
+ if len(adapter_names) != len(weights):
602
+ raise ValueError(
603
+ f"Length of adapter names {len(adapter_names)} is not equal to the length of their weights {len(weights)}."
604
+ )
605
+
606
+ set_weights_and_activate_adapters(self, adapter_names, weights)
607
+
608
+ def disable_lora(self):
609
+ """
610
+ Disable the UNet's active LoRA layers.
611
+
612
+ Example:
613
+
614
+ ```py
615
+ from diffusers import AutoPipelineForText2Image
616
+ import torch
617
+
618
+ pipeline = AutoPipelineForText2Image.from_pretrained(
619
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
620
+ ).to("cuda")
621
+ pipeline.load_lora_weights(
622
+ "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
623
+ )
624
+ pipeline.disable_lora()
625
+ ```
626
+ """
627
+ if not USE_PEFT_BACKEND:
628
+ raise ValueError("PEFT backend is required for this method.")
629
+ set_adapter_layers(self, enabled=False)
630
+
631
+ def enable_lora(self):
632
+ """
633
+ Enable the UNet's active LoRA layers.
634
+
635
+ Example:
636
+
637
+ ```py
638
+ from diffusers import AutoPipelineForText2Image
639
+ import torch
640
+
641
+ pipeline = AutoPipelineForText2Image.from_pretrained(
642
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
643
+ ).to("cuda")
644
+ pipeline.load_lora_weights(
645
+ "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
646
+ )
647
+ pipeline.enable_lora()
648
+ ```
649
+ """
650
+ if not USE_PEFT_BACKEND:
651
+ raise ValueError("PEFT backend is required for this method.")
652
+ set_adapter_layers(self, enabled=True)
653
+
654
+ def delete_adapters(self, adapter_names: Union[List[str], str]):
655
+ """
656
+ Delete an adapter's LoRA layers from the UNet.
657
+
658
+ Args:
659
+ adapter_names (`Union[List[str], str]`):
660
+ The names (single string or list of strings) of the adapter to delete.
661
+
662
+ Example:
663
+
664
+ ```py
665
+ from diffusers import AutoPipelineForText2Image
666
+ import torch
667
+
668
+ pipeline = AutoPipelineForText2Image.from_pretrained(
669
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
670
+ ).to("cuda")
671
+ pipeline.load_lora_weights(
672
+ "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_names="cinematic"
673
+ )
674
+ pipeline.delete_adapters("cinematic")
675
+ ```
676
+ """
677
+ if not USE_PEFT_BACKEND:
678
+ raise ValueError("PEFT backend is required for this method.")
679
+
680
+ if isinstance(adapter_names, str):
681
+ adapter_names = [adapter_names]
682
+
683
+ for adapter_name in adapter_names:
684
+ delete_adapter_layers(self, adapter_name)
685
+
686
+ # Pop also the corresponding adapter from the config
687
+ if hasattr(self, "peft_config"):
688
+ self.peft_config.pop(adapter_name, None)
689
+
690
+ def _convert_ip_adapter_image_proj_to_diffusers(self, state_dict):
691
+ updated_state_dict = {}
692
+ image_projection = None
693
+
694
+ if "proj.weight" in state_dict:
695
+ # IP-Adapter
696
+ num_image_text_embeds = 4
697
+ clip_embeddings_dim = state_dict["proj.weight"].shape[-1]
698
+ cross_attention_dim = state_dict["proj.weight"].shape[0] // 4
699
+
700
+ image_projection = ImageProjection(
701
+ cross_attention_dim=cross_attention_dim,
702
+ image_embed_dim=clip_embeddings_dim,
703
+ num_image_text_embeds=num_image_text_embeds,
704
+ )
705
+
706
+ for key, value in state_dict.items():
707
+ diffusers_name = key.replace("proj", "image_embeds")
708
+ updated_state_dict[diffusers_name] = value
709
+
710
+ elif "proj.3.weight" in state_dict:
711
+ # IP-Adapter Full
712
+ clip_embeddings_dim = state_dict["proj.0.weight"].shape[0]
713
+ cross_attention_dim = state_dict["proj.3.weight"].shape[0]
714
+
715
+ image_projection = IPAdapterFullImageProjection(
716
+ cross_attention_dim=cross_attention_dim, image_embed_dim=clip_embeddings_dim
717
+ )
718
+
719
+ for key, value in state_dict.items():
720
+ diffusers_name = key.replace("proj.0", "ff.net.0.proj")
721
+ diffusers_name = diffusers_name.replace("proj.2", "ff.net.2")
722
+ diffusers_name = diffusers_name.replace("proj.3", "norm")
723
+ updated_state_dict[diffusers_name] = value
724
+
725
+ else:
726
+ # IP-Adapter Plus
727
+ num_image_text_embeds = state_dict["latents"].shape[1]
728
+ embed_dims = state_dict["proj_in.weight"].shape[1]
729
+ output_dims = state_dict["proj_out.weight"].shape[0]
730
+ hidden_dims = state_dict["latents"].shape[2]
731
+ heads = state_dict["layers.0.0.to_q.weight"].shape[0] // 64
732
+
733
+ image_projection = IPAdapterPlusImageProjection(
734
+ embed_dims=embed_dims,
735
+ output_dims=output_dims,
736
+ hidden_dims=hidden_dims,
737
+ heads=heads,
738
+ num_queries=num_image_text_embeds,
739
+ )
740
+
741
+ for key, value in state_dict.items():
742
+ diffusers_name = key.replace("0.to", "2.to")
743
+ diffusers_name = diffusers_name.replace("1.0.weight", "3.0.weight")
744
+ diffusers_name = diffusers_name.replace("1.0.bias", "3.0.bias")
745
+ diffusers_name = diffusers_name.replace("1.1.weight", "3.1.net.0.proj.weight")
746
+ diffusers_name = diffusers_name.replace("1.3.weight", "3.1.net.2.weight")
747
+
748
+ if "norm1" in diffusers_name:
749
+ updated_state_dict[diffusers_name.replace("0.norm1", "0")] = value
750
+ elif "norm2" in diffusers_name:
751
+ updated_state_dict[diffusers_name.replace("0.norm2", "1")] = value
752
+ elif "to_kv" in diffusers_name:
753
+ v_chunk = value.chunk(2, dim=0)
754
+ updated_state_dict[diffusers_name.replace("to_kv", "to_k")] = v_chunk[0]
755
+ updated_state_dict[diffusers_name.replace("to_kv", "to_v")] = v_chunk[1]
756
+ elif "to_out" in diffusers_name:
757
+ updated_state_dict[diffusers_name.replace("to_out", "to_out.0")] = value
758
+ else:
759
+ updated_state_dict[diffusers_name] = value
760
+
761
+ image_projection.load_state_dict(updated_state_dict)
762
+ return image_projection
763
+
764
+ def _load_ip_adapter_weights(self, state_dict):
765
+ from ..models.attention_processor import (
766
+ AttnProcessor,
767
+ AttnProcessor2_0,
768
+ IPAdapterAttnProcessor,
769
+ IPAdapterAttnProcessor2_0,
770
+ )
771
+
772
+ if "proj.weight" in state_dict["image_proj"]:
773
+ # IP-Adapter
774
+ num_image_text_embeds = 4
775
+ elif "proj.3.weight" in state_dict["image_proj"]:
776
+ # IP-Adapter Full Face
777
+ num_image_text_embeds = 257 # 256 CLIP tokens + 1 CLS token
778
+ else:
779
+ # IP-Adapter Plus
780
+ num_image_text_embeds = state_dict["image_proj"]["latents"].shape[1]
781
+
782
+ # Set encoder_hid_proj after loading ip_adapter weights,
783
+ # because `IPAdapterPlusImageProjection` also has `attn_processors`.
784
+ self.encoder_hid_proj = None
785
+
786
+ # set ip-adapter cross-attention processors & load state_dict
787
+ attn_procs = {}
788
+ key_id = 1
789
+ for name in self.attn_processors.keys():
790
+ cross_attention_dim = None if name.endswith("attn1.processor") else self.config.cross_attention_dim
791
+ if name.startswith("mid_block"):
792
+ hidden_size = self.config.block_out_channels[-1]
793
+ elif name.startswith("up_blocks"):
794
+ block_id = int(name[len("up_blocks.")])
795
+ hidden_size = list(reversed(self.config.block_out_channels))[block_id]
796
+ elif name.startswith("down_blocks"):
797
+ block_id = int(name[len("down_blocks.")])
798
+ hidden_size = self.config.block_out_channels[block_id]
799
+ if cross_attention_dim is None or "motion_modules" in name:
800
+ attn_processor_class = (
801
+ AttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else AttnProcessor
802
+ )
803
+ attn_procs[name] = attn_processor_class()
804
+ else:
805
+ attn_processor_class = (
806
+ IPAdapterAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else IPAdapterAttnProcessor
807
+ )
808
+ attn_procs[name] = attn_processor_class(
809
+ hidden_size=hidden_size,
810
+ cross_attention_dim=cross_attention_dim,
811
+ scale=1.0,
812
+ num_tokens=num_image_text_embeds,
813
+ ).to(dtype=self.dtype, device=self.device)
814
+
815
+ value_dict = {}
816
+ for k, w in attn_procs[name].state_dict().items():
817
+ value_dict.update({f"{k}": state_dict["ip_adapter"][f"{key_id}.{k}"]})
818
+
819
+ attn_procs[name].load_state_dict(value_dict)
820
+ key_id += 2
821
+
822
+ self.set_attn_processor(attn_procs)
823
+
824
+ # convert IP-Adapter Image Projection layers to diffusers
825
+ image_projection = self._convert_ip_adapter_image_proj_to_diffusers(state_dict["image_proj"])
826
+
827
+ self.encoder_hid_proj = image_projection.to(device=self.device, dtype=self.dtype)
828
+ self.config.encoder_hid_dim_type = "ip_image_proj"
diffusers/loaders/utils.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Dict
16
+
17
+ import torch
18
+
19
+
20
+ class AttnProcsLayers(torch.nn.Module):
21
+ def __init__(self, state_dict: Dict[str, torch.Tensor]):
22
+ super().__init__()
23
+ self.layers = torch.nn.ModuleList(state_dict.values())
24
+ self.mapping = dict(enumerate(state_dict.keys()))
25
+ self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())}
26
+
27
+ # .processor for unet, .self_attn for text encoder
28
+ self.split_keys = [".processor", ".self_attn"]
29
+
30
+ # we add a hook to state_dict() and load_state_dict() so that the
31
+ # naming fits with `unet.attn_processors`
32
+ def map_to(module, state_dict, *args, **kwargs):
33
+ new_state_dict = {}
34
+ for key, value in state_dict.items():
35
+ num = int(key.split(".")[1]) # 0 is always "layers"
36
+ new_key = key.replace(f"layers.{num}", module.mapping[num])
37
+ new_state_dict[new_key] = value
38
+
39
+ return new_state_dict
40
+
41
+ def remap_key(key, state_dict):
42
+ for k in self.split_keys:
43
+ if k in key:
44
+ return key.split(k)[0] + k
45
+
46
+ raise ValueError(
47
+ f"There seems to be a problem with the state_dict: {set(state_dict.keys())}. {key} has to have one of {self.split_keys}."
48
+ )
49
+
50
+ def map_from(module, state_dict, *args, **kwargs):
51
+ all_keys = list(state_dict.keys())
52
+ for key in all_keys:
53
+ replace_key = remap_key(key, state_dict)
54
+ new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}")
55
+ state_dict[new_key] = state_dict[key]
56
+ del state_dict[key]
57
+
58
+ self._register_state_dict_hook(map_to)
59
+ self._register_load_state_dict_pre_hook(map_from, with_module=True)
diffusers/models/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Models
2
+
3
+ For more detail on the models, please refer to the [docs](https://huggingface.co/docs/diffusers/api/models/overview).
diffusers/models/__init__.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ..utils import (
18
+ DIFFUSERS_SLOW_IMPORT,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {}
26
+
27
+ if is_torch_available():
28
+ _import_structure["adapter"] = ["MultiAdapter", "T2IAdapter"]
29
+ _import_structure["autoencoders.autoencoder_asym_kl"] = ["AsymmetricAutoencoderKL"]
30
+ _import_structure["autoencoders.autoencoder_kl"] = ["AutoencoderKL"]
31
+ _import_structure["autoencoders.autoencoder_kl_temporal_decoder"] = ["AutoencoderKLTemporalDecoder"]
32
+ _import_structure["autoencoders.autoencoder_tiny"] = ["AutoencoderTiny"]
33
+ _import_structure["autoencoders.consistency_decoder_vae"] = ["ConsistencyDecoderVAE"]
34
+ _import_structure["controlnet"] = ["ControlNetModel"]
35
+ _import_structure["dual_transformer_2d"] = ["DualTransformer2DModel"]
36
+ _import_structure["embeddings"] = ["ImageProjection"]
37
+ _import_structure["modeling_utils"] = ["ModelMixin"]
38
+ _import_structure["prior_transformer"] = ["PriorTransformer"]
39
+ _import_structure["t5_film_transformer"] = ["T5FilmDecoder"]
40
+ _import_structure["transformer_2d"] = ["Transformer2DModel"]
41
+ _import_structure["transformer_temporal"] = ["TransformerTemporalModel"]
42
+ _import_structure["unets.unet_1d"] = ["UNet1DModel"]
43
+ _import_structure["unets.unet_2d"] = ["UNet2DModel"]
44
+ _import_structure["unets.unet_2d_condition"] = ["UNet2DConditionModel"]
45
+ _import_structure["unets.unet_3d_condition"] = ["UNet3DConditionModel"]
46
+ _import_structure["unets.unet_kandinsky3"] = ["Kandinsky3UNet"]
47
+ _import_structure["unets.unet_motion_model"] = ["MotionAdapter", "UNetMotionModel"]
48
+ _import_structure["unets.unet_spatio_temporal_condition"] = ["UNetSpatioTemporalConditionModel"]
49
+ _import_structure["unets.uvit_2d"] = ["UVit2DModel"]
50
+ _import_structure["vq_model"] = ["VQModel"]
51
+
52
+ if is_flax_available():
53
+ _import_structure["controlnet_flax"] = ["FlaxControlNetModel"]
54
+ _import_structure["unets.unet_2d_condition_flax"] = ["FlaxUNet2DConditionModel"]
55
+ _import_structure["vae_flax"] = ["FlaxAutoencoderKL"]
56
+
57
+
58
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
59
+ if is_torch_available():
60
+ from .adapter import MultiAdapter, T2IAdapter
61
+ from .autoencoders import (
62
+ AsymmetricAutoencoderKL,
63
+ AutoencoderKL,
64
+ AutoencoderKLTemporalDecoder,
65
+ AutoencoderTiny,
66
+ ConsistencyDecoderVAE,
67
+ )
68
+ from .controlnet import ControlNetModel
69
+ from .dual_transformer_2d import DualTransformer2DModel
70
+ from .embeddings import ImageProjection
71
+ from .modeling_utils import ModelMixin
72
+ from .prior_transformer import PriorTransformer
73
+ from .t5_film_transformer import T5FilmDecoder
74
+ from .transformer_2d import Transformer2DModel
75
+ from .transformer_temporal import TransformerTemporalModel
76
+ from .unets import (
77
+ Kandinsky3UNet,
78
+ MotionAdapter,
79
+ UNet1DModel,
80
+ UNet2DConditionModel,
81
+ UNet2DModel,
82
+ UNet3DConditionModel,
83
+ UNetMotionModel,
84
+ UNetSpatioTemporalConditionModel,
85
+ UVit2DModel,
86
+ )
87
+ from .vq_model import VQModel
88
+
89
+ if is_flax_available():
90
+ from .controlnet_flax import FlaxControlNetModel
91
+ from .unets import FlaxUNet2DConditionModel
92
+ from .vae_flax import FlaxAutoencoderKL
93
+
94
+ else:
95
+ import sys
96
+
97
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diffusers/models/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (2.58 kB). View file
 
diffusers/models/__pycache__/activations.cpython-38.pyc ADDED
Binary file (4.5 kB). View file
 
diffusers/models/__pycache__/attention.cpython-38.pyc ADDED
Binary file (16 kB). View file
 
diffusers/models/__pycache__/attention_processor.cpython-38.pyc ADDED
Binary file (55.5 kB). View file
 
diffusers/models/__pycache__/downsampling.cpython-38.pyc ADDED
Binary file (10.3 kB). View file
 
diffusers/models/__pycache__/dual_transformer_2d.cpython-38.pyc ADDED
Binary file (5.92 kB). View file
 
diffusers/models/__pycache__/embeddings.cpython-38.pyc ADDED
Binary file (26.2 kB). View file
 
diffusers/models/__pycache__/lora.cpython-38.pyc ADDED
Binary file (12.2 kB). View file
 
diffusers/models/__pycache__/modeling_outputs.cpython-38.pyc ADDED
Binary file (823 Bytes). View file
 
diffusers/models/__pycache__/modeling_utils.cpython-38.pyc ADDED
Binary file (34.5 kB). View file
 
diffusers/models/__pycache__/normalization.cpython-38.pyc ADDED
Binary file (8.77 kB). View file
 
diffusers/models/__pycache__/resnet.cpython-38.pyc ADDED
Binary file (22.9 kB). View file