Update pipeline.py
Browse files- pipeline.py +5 -5
pipeline.py
CHANGED
@@ -1661,7 +1661,7 @@ class StableDiffusionXLControlNetPipeline(
|
|
1661 |
device=device,
|
1662 |
dtype=controlnet.dtype,
|
1663 |
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
1664 |
-
do_perturbed_attention_guidance=self.
|
1665 |
guess_mode=guess_mode,
|
1666 |
)
|
1667 |
height, width = image.shape[-2:]
|
@@ -1677,7 +1677,7 @@ class StableDiffusionXLControlNetPipeline(
|
|
1677 |
device=device,
|
1678 |
dtype=controlnet.dtype,
|
1679 |
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
1680 |
-
do_perturbed_attention_guidance=self.
|
1681 |
guess_mode=guess_mode,
|
1682 |
)
|
1683 |
images.append(image_)
|
@@ -1937,19 +1937,19 @@ class StableDiffusionXLControlNetPipeline(
|
|
1937 |
return_dict=False,
|
1938 |
)
|
1939 |
|
1940 |
-
if guess_mode and self.do_classifier_free_guidance and not self.
|
1941 |
# Infered ControlNet only for the conditional batch.
|
1942 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
1943 |
# add 0 to the unconditional batch to keep it unchanged.
|
1944 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
1945 |
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
|
1946 |
-
elif guess_mode and not self.do_classifier_free_guidance and self.
|
1947 |
# Infered ControlNet only for the conditional batch.
|
1948 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
1949 |
# add 0 to the unconditional batch to keep it unchanged.
|
1950 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
1951 |
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
|
1952 |
-
elif guess_mode and self.do_classifier_free_guidance and self.
|
1953 |
raise ValueError(
|
1954 |
"guess mode with both guidance is not supported."
|
1955 |
)
|
|
|
1661 |
device=device,
|
1662 |
dtype=controlnet.dtype,
|
1663 |
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
1664 |
+
do_perturbed_attention_guidance=self.do_adversarial_guidance,
|
1665 |
guess_mode=guess_mode,
|
1666 |
)
|
1667 |
height, width = image.shape[-2:]
|
|
|
1677 |
device=device,
|
1678 |
dtype=controlnet.dtype,
|
1679 |
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
1680 |
+
do_perturbed_attention_guidance=self.do_adversarial_guidance,
|
1681 |
guess_mode=guess_mode,
|
1682 |
)
|
1683 |
images.append(image_)
|
|
|
1937 |
return_dict=False,
|
1938 |
)
|
1939 |
|
1940 |
+
if guess_mode and self.do_classifier_free_guidance and not self.do_adversarial_guidance:
|
1941 |
# Infered ControlNet only for the conditional batch.
|
1942 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
1943 |
# add 0 to the unconditional batch to keep it unchanged.
|
1944 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
1945 |
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
|
1946 |
+
elif guess_mode and not self.do_classifier_free_guidance and self.do_adversarial_guidance:
|
1947 |
# Infered ControlNet only for the conditional batch.
|
1948 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
1949 |
# add 0 to the unconditional batch to keep it unchanged.
|
1950 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
1951 |
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
|
1952 |
+
elif guess_mode and self.do_classifier_free_guidance and self.do_adversarial_guidance:
|
1953 |
raise ValueError(
|
1954 |
"guess mode with both guidance is not supported."
|
1955 |
)
|